1 /* bnx2x_sp.c: Qlogic Everest network driver.
2  *
3  * Copyright 2011-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * Unless you and Qlogic execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2, available
10  * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
11  *
12  * Notwithstanding the above, under no circumstances may you combine this
13  * software in any way with any other Qlogic software provided under a
14  * license other than the GPL, without Qlogic's express prior written
15  * consent.
16  *
17  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
18  * Written by: Vladislav Zolotarov
19  *
20  */
21 
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 
24 #include <linux/module.h>
25 #include <linux/crc32.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/crc32c.h>
29 #include "bnx2x.h"
30 #include "bnx2x_cmn.h"
31 #include "bnx2x_sp.h"
32 
33 #define BNX2X_MAX_EMUL_MULTI		16
34 
35 /**** Exe Queue interfaces ****/
36 
37 /**
38  * bnx2x_exe_queue_init - init the Exe Queue object
39  *
40  * @o:		pointer to the object
41  * @exe_len:	length
42  * @owner:	pointer to the owner
43  * @validate:	validate function pointer
44  * @optimize:	optimize function pointer
45  * @exec:	execute function pointer
46  * @get:	get function pointer
47  */
48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49 					struct bnx2x_exe_queue_obj *o,
50 					int exe_len,
51 					union bnx2x_qable_obj *owner,
52 					exe_q_validate validate,
53 					exe_q_remove remove,
54 					exe_q_optimize optimize,
55 					exe_q_execute exec,
56 					exe_q_get get)
57 {
58 	memset(o, 0, sizeof(*o));
59 
60 	INIT_LIST_HEAD(&o->exe_queue);
61 	INIT_LIST_HEAD(&o->pending_comp);
62 
63 	spin_lock_init(&o->lock);
64 
65 	o->exe_chunk_len = exe_len;
66 	o->owner         = owner;
67 
68 	/* Owner specific callbacks */
69 	o->validate      = validate;
70 	o->remove        = remove;
71 	o->optimize      = optimize;
72 	o->execute       = exec;
73 	o->get           = get;
74 
75 	DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
76 	   exe_len);
77 }
78 
79 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80 					     struct bnx2x_exeq_elem *elem)
81 {
82 	DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
83 	kfree(elem);
84 }
85 
86 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
87 {
88 	struct bnx2x_exeq_elem *elem;
89 	int cnt = 0;
90 
91 	spin_lock_bh(&o->lock);
92 
93 	list_for_each_entry(elem, &o->exe_queue, link)
94 		cnt++;
95 
96 	spin_unlock_bh(&o->lock);
97 
98 	return cnt;
99 }
100 
101 /**
102  * bnx2x_exe_queue_add - add a new element to the execution queue
103  *
104  * @bp:		driver handle
105  * @o:		queue
106  * @cmd:	new command to add
107  * @restore:	true - do not optimize the command
108  *
109  * If the element is optimized or is illegal, frees it.
110  */
111 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
112 				      struct bnx2x_exe_queue_obj *o,
113 				      struct bnx2x_exeq_elem *elem,
114 				      bool restore)
115 {
116 	int rc;
117 
118 	spin_lock_bh(&o->lock);
119 
120 	if (!restore) {
121 		/* Try to cancel this element queue */
122 		rc = o->optimize(bp, o->owner, elem);
123 		if (rc)
124 			goto free_and_exit;
125 
126 		/* Check if this request is ok */
127 		rc = o->validate(bp, o->owner, elem);
128 		if (rc) {
129 			DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
130 			goto free_and_exit;
131 		}
132 	}
133 
134 	/* If so, add it to the execution queue */
135 	list_add_tail(&elem->link, &o->exe_queue);
136 
137 	spin_unlock_bh(&o->lock);
138 
139 	return 0;
140 
141 free_and_exit:
142 	bnx2x_exe_queue_free_elem(bp, elem);
143 
144 	spin_unlock_bh(&o->lock);
145 
146 	return rc;
147 }
148 
149 static inline void __bnx2x_exe_queue_reset_pending(
150 	struct bnx2x *bp,
151 	struct bnx2x_exe_queue_obj *o)
152 {
153 	struct bnx2x_exeq_elem *elem;
154 
155 	while (!list_empty(&o->pending_comp)) {
156 		elem = list_first_entry(&o->pending_comp,
157 					struct bnx2x_exeq_elem, link);
158 
159 		list_del(&elem->link);
160 		bnx2x_exe_queue_free_elem(bp, elem);
161 	}
162 }
163 
164 /**
165  * bnx2x_exe_queue_step - execute one execution chunk atomically
166  *
167  * @bp:			driver handle
168  * @o:			queue
169  * @ramrod_flags:	flags
170  *
171  * (Should be called while holding the exe_queue->lock).
172  */
173 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
174 				       struct bnx2x_exe_queue_obj *o,
175 				       unsigned long *ramrod_flags)
176 {
177 	struct bnx2x_exeq_elem *elem, spacer;
178 	int cur_len = 0, rc;
179 
180 	memset(&spacer, 0, sizeof(spacer));
181 
182 	/* Next step should not be performed until the current is finished,
183 	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
184 	 * properly clear object internals without sending any command to the FW
185 	 * which also implies there won't be any completion to clear the
186 	 * 'pending' list.
187 	 */
188 	if (!list_empty(&o->pending_comp)) {
189 		if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
190 			DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
191 			__bnx2x_exe_queue_reset_pending(bp, o);
192 		} else {
193 			return 1;
194 		}
195 	}
196 
197 	/* Run through the pending commands list and create a next
198 	 * execution chunk.
199 	 */
200 	while (!list_empty(&o->exe_queue)) {
201 		elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
202 					link);
203 		WARN_ON(!elem->cmd_len);
204 
205 		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
206 			cur_len += elem->cmd_len;
207 			/* Prevent from both lists being empty when moving an
208 			 * element. This will allow the call of
209 			 * bnx2x_exe_queue_empty() without locking.
210 			 */
211 			list_add_tail(&spacer.link, &o->pending_comp);
212 			mb();
213 			list_move_tail(&elem->link, &o->pending_comp);
214 			list_del(&spacer.link);
215 		} else
216 			break;
217 	}
218 
219 	/* Sanity check */
220 	if (!cur_len)
221 		return 0;
222 
223 	rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
224 	if (rc < 0)
225 		/* In case of an error return the commands back to the queue
226 		 * and reset the pending_comp.
227 		 */
228 		list_splice_init(&o->pending_comp, &o->exe_queue);
229 	else if (!rc)
230 		/* If zero is returned, means there are no outstanding pending
231 		 * completions and we may dismiss the pending list.
232 		 */
233 		__bnx2x_exe_queue_reset_pending(bp, o);
234 
235 	return rc;
236 }
237 
238 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
239 {
240 	bool empty = list_empty(&o->exe_queue);
241 
242 	/* Don't reorder!!! */
243 	mb();
244 
245 	return empty && list_empty(&o->pending_comp);
246 }
247 
248 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
249 	struct bnx2x *bp)
250 {
251 	DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
252 	return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
253 }
254 
255 /************************ raw_obj functions ***********************************/
256 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
257 {
258 	return !!test_bit(o->state, o->pstate);
259 }
260 
261 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
262 {
263 	smp_mb__before_atomic();
264 	clear_bit(o->state, o->pstate);
265 	smp_mb__after_atomic();
266 }
267 
268 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
269 {
270 	smp_mb__before_atomic();
271 	set_bit(o->state, o->pstate);
272 	smp_mb__after_atomic();
273 }
274 
275 /**
276  * bnx2x_state_wait - wait until the given bit(state) is cleared
277  *
278  * @bp:		device handle
279  * @state:	state which is to be cleared
280  * @state_p:	state buffer
281  *
282  */
283 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
284 				   unsigned long *pstate)
285 {
286 	/* can take a while if any port is running */
287 	int cnt = 5000;
288 
289 	if (CHIP_REV_IS_EMUL(bp))
290 		cnt *= 20;
291 
292 	DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
293 
294 	might_sleep();
295 	while (cnt--) {
296 		if (!test_bit(state, pstate)) {
297 #ifdef BNX2X_STOP_ON_ERROR
298 			DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
299 #endif
300 			return 0;
301 		}
302 
303 		usleep_range(1000, 2000);
304 
305 		if (bp->panic)
306 			return -EIO;
307 	}
308 
309 	/* timeout! */
310 	BNX2X_ERR("timeout waiting for state %d\n", state);
311 #ifdef BNX2X_STOP_ON_ERROR
312 	bnx2x_panic();
313 #endif
314 
315 	return -EBUSY;
316 }
317 
318 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
319 {
320 	return bnx2x_state_wait(bp, raw->state, raw->pstate);
321 }
322 
323 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
324 /* credit handling callbacks */
325 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
326 {
327 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
328 
329 	WARN_ON(!mp);
330 
331 	return mp->get_entry(mp, offset);
332 }
333 
334 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
335 {
336 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
337 
338 	WARN_ON(!mp);
339 
340 	return mp->get(mp, 1);
341 }
342 
343 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
344 {
345 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
346 
347 	WARN_ON(!vp);
348 
349 	return vp->get_entry(vp, offset);
350 }
351 
352 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
353 {
354 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
355 
356 	WARN_ON(!vp);
357 
358 	return vp->get(vp, 1);
359 }
360 
361 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
362 {
363 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
364 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
365 
366 	if (!mp->get(mp, 1))
367 		return false;
368 
369 	if (!vp->get(vp, 1)) {
370 		mp->put(mp, 1);
371 		return false;
372 	}
373 
374 	return true;
375 }
376 
377 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
378 {
379 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
380 
381 	return mp->put_entry(mp, offset);
382 }
383 
384 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
385 {
386 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
387 
388 	return mp->put(mp, 1);
389 }
390 
391 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
392 {
393 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
394 
395 	return vp->put_entry(vp, offset);
396 }
397 
398 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
399 {
400 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
401 
402 	return vp->put(vp, 1);
403 }
404 
405 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
406 {
407 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
408 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
409 
410 	if (!mp->put(mp, 1))
411 		return false;
412 
413 	if (!vp->put(vp, 1)) {
414 		mp->get(mp, 1);
415 		return false;
416 	}
417 
418 	return true;
419 }
420 
421 /**
422  * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
423  *
424  * @bp:		device handle
425  * @o:		vlan_mac object
426  *
427  * @details: Non-blocking implementation; should be called under execution
428  *           queue lock.
429  */
430 static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
431 					    struct bnx2x_vlan_mac_obj *o)
432 {
433 	if (o->head_reader) {
434 		DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
435 		return -EBUSY;
436 	}
437 
438 	DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
439 	return 0;
440 }
441 
442 /**
443  * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
444  *
445  * @bp:		device handle
446  * @o:		vlan_mac object
447  *
448  * @details Should be called under execution queue lock; notice it might release
449  *          and reclaim it during its run.
450  */
451 static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
452 					    struct bnx2x_vlan_mac_obj *o)
453 {
454 	int rc;
455 	unsigned long ramrod_flags = o->saved_ramrod_flags;
456 
457 	DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
458 	   ramrod_flags);
459 	o->head_exe_request = false;
460 	o->saved_ramrod_flags = 0;
461 	rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
462 	if ((rc != 0) && (rc != 1)) {
463 		BNX2X_ERR("execution of pending commands failed with rc %d\n",
464 			  rc);
465 #ifdef BNX2X_STOP_ON_ERROR
466 		bnx2x_panic();
467 #endif
468 	}
469 }
470 
471 /**
472  * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
473  *
474  * @bp:			device handle
475  * @o:			vlan_mac object
476  * @ramrod_flags:	ramrod flags of missed execution
477  *
478  * @details Should be called under execution queue lock.
479  */
480 static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
481 				    struct bnx2x_vlan_mac_obj *o,
482 				    unsigned long ramrod_flags)
483 {
484 	o->head_exe_request = true;
485 	o->saved_ramrod_flags = ramrod_flags;
486 	DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
487 	   ramrod_flags);
488 }
489 
490 /**
491  * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
492  *
493  * @bp:			device handle
494  * @o:			vlan_mac object
495  *
496  * @details Should be called under execution queue lock. Notice if a pending
497  *          execution exists, it would perform it - possibly releasing and
498  *          reclaiming the execution queue lock.
499  */
500 static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
501 					    struct bnx2x_vlan_mac_obj *o)
502 {
503 	/* It's possible a new pending execution was added since this writer
504 	 * executed. If so, execute again. [Ad infinitum]
505 	 */
506 	while (o->head_exe_request) {
507 		DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
508 		__bnx2x_vlan_mac_h_exec_pending(bp, o);
509 	}
510 }
511 
512 
513 /**
514  * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
515  *
516  * @bp:			device handle
517  * @o:			vlan_mac object
518  *
519  * @details Should be called under the execution queue lock. May sleep. May
520  *          release and reclaim execution queue lock during its run.
521  */
522 static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
523 					struct bnx2x_vlan_mac_obj *o)
524 {
525 	/* If we got here, we're holding lock --> no WRITER exists */
526 	o->head_reader++;
527 	DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
528 	   o->head_reader);
529 
530 	return 0;
531 }
532 
533 /**
534  * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
535  *
536  * @bp:			device handle
537  * @o:			vlan_mac object
538  *
539  * @details May sleep. Claims and releases execution queue lock during its run.
540  */
541 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
542 			       struct bnx2x_vlan_mac_obj *o)
543 {
544 	int rc;
545 
546 	spin_lock_bh(&o->exe_queue.lock);
547 	rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
548 	spin_unlock_bh(&o->exe_queue.lock);
549 
550 	return rc;
551 }
552 
553 /**
554  * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
555  *
556  * @bp:			device handle
557  * @o:			vlan_mac object
558  *
559  * @details Should be called under execution queue lock. Notice if a pending
560  *          execution exists, it would be performed if this was the last
561  *          reader. possibly releasing and reclaiming the execution queue lock.
562  */
563 static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
564 					  struct bnx2x_vlan_mac_obj *o)
565 {
566 	if (!o->head_reader) {
567 		BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
568 #ifdef BNX2X_STOP_ON_ERROR
569 		bnx2x_panic();
570 #endif
571 	} else {
572 		o->head_reader--;
573 		DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
574 		   o->head_reader);
575 	}
576 
577 	/* It's possible a new pending execution was added, and that this reader
578 	 * was last - if so we need to execute the command.
579 	 */
580 	if (!o->head_reader && o->head_exe_request) {
581 		DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
582 
583 		/* Writer release will do the trick */
584 		__bnx2x_vlan_mac_h_write_unlock(bp, o);
585 	}
586 }
587 
588 /**
589  * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
590  *
591  * @bp:			device handle
592  * @o:			vlan_mac object
593  *
594  * @details Notice if a pending execution exists, it would be performed if this
595  *          was the last reader. Claims and releases the execution queue lock
596  *          during its run.
597  */
598 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
599 				  struct bnx2x_vlan_mac_obj *o)
600 {
601 	spin_lock_bh(&o->exe_queue.lock);
602 	__bnx2x_vlan_mac_h_read_unlock(bp, o);
603 	spin_unlock_bh(&o->exe_queue.lock);
604 }
605 
606 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
607 				int n, u8 *base, u8 stride, u8 size)
608 {
609 	struct bnx2x_vlan_mac_registry_elem *pos;
610 	u8 *next = base;
611 	int counter = 0;
612 	int read_lock;
613 
614 	DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
615 	read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
616 	if (read_lock != 0)
617 		BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
618 
619 	/* traverse list */
620 	list_for_each_entry(pos, &o->head, link) {
621 		if (counter < n) {
622 			memcpy(next, &pos->u, size);
623 			counter++;
624 			DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
625 			   counter, next);
626 			next += stride + size;
627 		}
628 	}
629 
630 	if (read_lock == 0) {
631 		DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
632 		bnx2x_vlan_mac_h_read_unlock(bp, o);
633 	}
634 
635 	return counter * ETH_ALEN;
636 }
637 
638 /* check_add() callbacks */
639 static int bnx2x_check_mac_add(struct bnx2x *bp,
640 			       struct bnx2x_vlan_mac_obj *o,
641 			       union bnx2x_classification_ramrod_data *data)
642 {
643 	struct bnx2x_vlan_mac_registry_elem *pos;
644 
645 	DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
646 
647 	if (!is_valid_ether_addr(data->mac.mac))
648 		return -EINVAL;
649 
650 	/* Check if a requested MAC already exists */
651 	list_for_each_entry(pos, &o->head, link)
652 		if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
653 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
654 			return -EEXIST;
655 
656 	return 0;
657 }
658 
659 static int bnx2x_check_vlan_add(struct bnx2x *bp,
660 				struct bnx2x_vlan_mac_obj *o,
661 				union bnx2x_classification_ramrod_data *data)
662 {
663 	struct bnx2x_vlan_mac_registry_elem *pos;
664 
665 	DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
666 
667 	list_for_each_entry(pos, &o->head, link)
668 		if (data->vlan.vlan == pos->u.vlan.vlan)
669 			return -EEXIST;
670 
671 	return 0;
672 }
673 
674 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
675 				    struct bnx2x_vlan_mac_obj *o,
676 				   union bnx2x_classification_ramrod_data *data)
677 {
678 	struct bnx2x_vlan_mac_registry_elem *pos;
679 
680 	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
681 	   data->vlan_mac.mac, data->vlan_mac.vlan);
682 
683 	list_for_each_entry(pos, &o->head, link)
684 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
685 		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
686 				  ETH_ALEN)) &&
687 		    (data->vlan_mac.is_inner_mac ==
688 		     pos->u.vlan_mac.is_inner_mac))
689 			return -EEXIST;
690 
691 	return 0;
692 }
693 
694 /* check_del() callbacks */
695 static struct bnx2x_vlan_mac_registry_elem *
696 	bnx2x_check_mac_del(struct bnx2x *bp,
697 			    struct bnx2x_vlan_mac_obj *o,
698 			    union bnx2x_classification_ramrod_data *data)
699 {
700 	struct bnx2x_vlan_mac_registry_elem *pos;
701 
702 	DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
703 
704 	list_for_each_entry(pos, &o->head, link)
705 		if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
706 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
707 			return pos;
708 
709 	return NULL;
710 }
711 
712 static struct bnx2x_vlan_mac_registry_elem *
713 	bnx2x_check_vlan_del(struct bnx2x *bp,
714 			     struct bnx2x_vlan_mac_obj *o,
715 			     union bnx2x_classification_ramrod_data *data)
716 {
717 	struct bnx2x_vlan_mac_registry_elem *pos;
718 
719 	DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
720 
721 	list_for_each_entry(pos, &o->head, link)
722 		if (data->vlan.vlan == pos->u.vlan.vlan)
723 			return pos;
724 
725 	return NULL;
726 }
727 
728 static struct bnx2x_vlan_mac_registry_elem *
729 	bnx2x_check_vlan_mac_del(struct bnx2x *bp,
730 				 struct bnx2x_vlan_mac_obj *o,
731 				 union bnx2x_classification_ramrod_data *data)
732 {
733 	struct bnx2x_vlan_mac_registry_elem *pos;
734 
735 	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
736 	   data->vlan_mac.mac, data->vlan_mac.vlan);
737 
738 	list_for_each_entry(pos, &o->head, link)
739 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
740 		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
741 			     ETH_ALEN)) &&
742 		    (data->vlan_mac.is_inner_mac ==
743 		     pos->u.vlan_mac.is_inner_mac))
744 			return pos;
745 
746 	return NULL;
747 }
748 
749 /* check_move() callback */
750 static bool bnx2x_check_move(struct bnx2x *bp,
751 			     struct bnx2x_vlan_mac_obj *src_o,
752 			     struct bnx2x_vlan_mac_obj *dst_o,
753 			     union bnx2x_classification_ramrod_data *data)
754 {
755 	struct bnx2x_vlan_mac_registry_elem *pos;
756 	int rc;
757 
758 	/* Check if we can delete the requested configuration from the first
759 	 * object.
760 	 */
761 	pos = src_o->check_del(bp, src_o, data);
762 
763 	/*  check if configuration can be added */
764 	rc = dst_o->check_add(bp, dst_o, data);
765 
766 	/* If this classification can not be added (is already set)
767 	 * or can't be deleted - return an error.
768 	 */
769 	if (rc || !pos)
770 		return false;
771 
772 	return true;
773 }
774 
775 static bool bnx2x_check_move_always_err(
776 	struct bnx2x *bp,
777 	struct bnx2x_vlan_mac_obj *src_o,
778 	struct bnx2x_vlan_mac_obj *dst_o,
779 	union bnx2x_classification_ramrod_data *data)
780 {
781 	return false;
782 }
783 
784 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
785 {
786 	struct bnx2x_raw_obj *raw = &o->raw;
787 	u8 rx_tx_flag = 0;
788 
789 	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
790 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
791 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
792 
793 	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
794 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
795 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
796 
797 	return rx_tx_flag;
798 }
799 
800 static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
801 				 bool add, unsigned char *dev_addr, int index)
802 {
803 	u32 wb_data[2];
804 	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
805 			 NIG_REG_LLH0_FUNC_MEM;
806 
807 	if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
808 		return;
809 
810 	if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
811 		return;
812 
813 	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
814 			 (add ? "ADD" : "DELETE"), index);
815 
816 	if (add) {
817 		/* LLH_FUNC_MEM is a u64 WB register */
818 		reg_offset += 8*index;
819 
820 		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
821 			      (dev_addr[4] <<  8) |  dev_addr[5]);
822 		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
823 
824 		REG_WR_DMAE(bp, reg_offset, wb_data, 2);
825 	}
826 
827 	REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
828 				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
829 }
830 
831 /**
832  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
833  *
834  * @bp:		device handle
835  * @o:		queue for which we want to configure this rule
836  * @add:	if true the command is an ADD command, DEL otherwise
837  * @opcode:	CLASSIFY_RULE_OPCODE_XXX
838  * @hdr:	pointer to a header to setup
839  *
840  */
841 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
842 	struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
843 	struct eth_classify_cmd_header *hdr)
844 {
845 	struct bnx2x_raw_obj *raw = &o->raw;
846 
847 	hdr->client_id = raw->cl_id;
848 	hdr->func_id = raw->func_id;
849 
850 	/* Rx or/and Tx (internal switching) configuration ? */
851 	hdr->cmd_general_data |=
852 		bnx2x_vlan_mac_get_rx_tx_flag(o);
853 
854 	if (add)
855 		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
856 
857 	hdr->cmd_general_data |=
858 		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
859 }
860 
861 /**
862  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
863  *
864  * @cid:	connection id
865  * @type:	BNX2X_FILTER_XXX_PENDING
866  * @hdr:	pointer to header to setup
867  * @rule_cnt:
868  *
869  * currently we always configure one rule and echo field to contain a CID and an
870  * opcode type.
871  */
872 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
873 				struct eth_classify_header *hdr, int rule_cnt)
874 {
875 	hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
876 				(type << BNX2X_SWCID_SHIFT));
877 	hdr->rule_cnt = (u8)rule_cnt;
878 }
879 
880 /* hw_config() callbacks */
881 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
882 				 struct bnx2x_vlan_mac_obj *o,
883 				 struct bnx2x_exeq_elem *elem, int rule_idx,
884 				 int cam_offset)
885 {
886 	struct bnx2x_raw_obj *raw = &o->raw;
887 	struct eth_classify_rules_ramrod_data *data =
888 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
889 	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
890 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
891 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
892 	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
893 	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
894 
895 	/* Set LLH CAM entry: currently only iSCSI and ETH macs are
896 	 * relevant. In addition, current implementation is tuned for a
897 	 * single ETH MAC.
898 	 *
899 	 * When multiple unicast ETH MACs PF configuration in switch
900 	 * independent mode is required (NetQ, multiple netdev MACs,
901 	 * etc.), consider better utilisation of 8 per function MAC
902 	 * entries in the LLH register. There is also
903 	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
904 	 * total number of CAM entries to 16.
905 	 *
906 	 * Currently we won't configure NIG for MACs other than a primary ETH
907 	 * MAC and iSCSI L2 MAC.
908 	 *
909 	 * If this MAC is moving from one Queue to another, no need to change
910 	 * NIG configuration.
911 	 */
912 	if (cmd != BNX2X_VLAN_MAC_MOVE) {
913 		if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
914 			bnx2x_set_mac_in_nig(bp, add, mac,
915 					     BNX2X_LLH_CAM_ISCSI_ETH_LINE);
916 		else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
917 			bnx2x_set_mac_in_nig(bp, add, mac,
918 					     BNX2X_LLH_CAM_ETH_LINE);
919 	}
920 
921 	/* Reset the ramrod data buffer for the first rule */
922 	if (rule_idx == 0)
923 		memset(data, 0, sizeof(*data));
924 
925 	/* Setup a command header */
926 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
927 				      &rule_entry->mac.header);
928 
929 	DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
930 	   (add ? "add" : "delete"), mac, raw->cl_id);
931 
932 	/* Set a MAC itself */
933 	bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
934 			      &rule_entry->mac.mac_mid,
935 			      &rule_entry->mac.mac_lsb, mac);
936 	rule_entry->mac.inner_mac =
937 		cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
938 
939 	/* MOVE: Add a rule that will add this MAC to the target Queue */
940 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
941 		rule_entry++;
942 		rule_cnt++;
943 
944 		/* Setup ramrod data */
945 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
946 					elem->cmd_data.vlan_mac.target_obj,
947 					      true, CLASSIFY_RULE_OPCODE_MAC,
948 					      &rule_entry->mac.header);
949 
950 		/* Set a MAC itself */
951 		bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
952 				      &rule_entry->mac.mac_mid,
953 				      &rule_entry->mac.mac_lsb, mac);
954 		rule_entry->mac.inner_mac =
955 			cpu_to_le16(elem->cmd_data.vlan_mac.
956 						u.mac.is_inner_mac);
957 	}
958 
959 	/* Set the ramrod data header */
960 	/* TODO: take this to the higher level in order to prevent multiple
961 		 writing */
962 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
963 					rule_cnt);
964 }
965 
966 /**
967  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
968  *
969  * @bp:		device handle
970  * @o:		queue
971  * @type:
972  * @cam_offset:	offset in cam memory
973  * @hdr:	pointer to a header to setup
974  *
975  * E1/E1H
976  */
977 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
978 	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
979 	struct mac_configuration_hdr *hdr)
980 {
981 	struct bnx2x_raw_obj *r = &o->raw;
982 
983 	hdr->length = 1;
984 	hdr->offset = (u8)cam_offset;
985 	hdr->client_id = cpu_to_le16(0xff);
986 	hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
987 				(type << BNX2X_SWCID_SHIFT));
988 }
989 
990 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
991 	struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
992 	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
993 {
994 	struct bnx2x_raw_obj *r = &o->raw;
995 	u32 cl_bit_vec = (1 << r->cl_id);
996 
997 	cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
998 	cfg_entry->pf_id = r->func_id;
999 	cfg_entry->vlan_id = cpu_to_le16(vlan_id);
1000 
1001 	if (add) {
1002 		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1003 			 T_ETH_MAC_COMMAND_SET);
1004 		SET_FLAG(cfg_entry->flags,
1005 			 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
1006 
1007 		/* Set a MAC in a ramrod data */
1008 		bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1009 				      &cfg_entry->middle_mac_addr,
1010 				      &cfg_entry->lsb_mac_addr, mac);
1011 	} else
1012 		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1013 			 T_ETH_MAC_COMMAND_INVALIDATE);
1014 }
1015 
1016 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
1017 	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
1018 	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
1019 {
1020 	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1021 	struct bnx2x_raw_obj *raw = &o->raw;
1022 
1023 	bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
1024 					 &config->hdr);
1025 	bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
1026 					 cfg_entry);
1027 
1028 	DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
1029 			 (add ? "setting" : "clearing"),
1030 			 mac, raw->cl_id, cam_offset);
1031 }
1032 
1033 /**
1034  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
1035  *
1036  * @bp:		device handle
1037  * @o:		bnx2x_vlan_mac_obj
1038  * @elem:	bnx2x_exeq_elem
1039  * @rule_idx:	rule_idx
1040  * @cam_offset: cam_offset
1041  */
1042 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
1043 				  struct bnx2x_vlan_mac_obj *o,
1044 				  struct bnx2x_exeq_elem *elem, int rule_idx,
1045 				  int cam_offset)
1046 {
1047 	struct bnx2x_raw_obj *raw = &o->raw;
1048 	struct mac_configuration_cmd *config =
1049 		(struct mac_configuration_cmd *)(raw->rdata);
1050 	/* 57710 and 57711 do not support MOVE command,
1051 	 * so it's either ADD or DEL
1052 	 */
1053 	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1054 		true : false;
1055 
1056 	/* Reset the ramrod data buffer */
1057 	memset(config, 0, sizeof(*config));
1058 
1059 	bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
1060 				     cam_offset, add,
1061 				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
1062 				     ETH_VLAN_FILTER_ANY_VLAN, config);
1063 }
1064 
1065 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
1066 				  struct bnx2x_vlan_mac_obj *o,
1067 				  struct bnx2x_exeq_elem *elem, int rule_idx,
1068 				  int cam_offset)
1069 {
1070 	struct bnx2x_raw_obj *raw = &o->raw;
1071 	struct eth_classify_rules_ramrod_data *data =
1072 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1073 	int rule_cnt = rule_idx + 1;
1074 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1075 	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1076 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1077 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1078 
1079 	/* Reset the ramrod data buffer for the first rule */
1080 	if (rule_idx == 0)
1081 		memset(data, 0, sizeof(*data));
1082 
1083 	/* Set a rule header */
1084 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1085 				      &rule_entry->vlan.header);
1086 
1087 	DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1088 			 vlan);
1089 
1090 	/* Set a VLAN itself */
1091 	rule_entry->vlan.vlan = cpu_to_le16(vlan);
1092 
1093 	/* MOVE: Add a rule that will add this MAC to the target Queue */
1094 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
1095 		rule_entry++;
1096 		rule_cnt++;
1097 
1098 		/* Setup ramrod data */
1099 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1100 					elem->cmd_data.vlan_mac.target_obj,
1101 					      true, CLASSIFY_RULE_OPCODE_VLAN,
1102 					      &rule_entry->vlan.header);
1103 
1104 		/* Set a VLAN itself */
1105 		rule_entry->vlan.vlan = cpu_to_le16(vlan);
1106 	}
1107 
1108 	/* Set the ramrod data header */
1109 	/* TODO: take this to the higher level in order to prevent multiple
1110 		 writing */
1111 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1112 					rule_cnt);
1113 }
1114 
1115 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
1116 				      struct bnx2x_vlan_mac_obj *o,
1117 				      struct bnx2x_exeq_elem *elem,
1118 				      int rule_idx, int cam_offset)
1119 {
1120 	struct bnx2x_raw_obj *raw = &o->raw;
1121 	struct eth_classify_rules_ramrod_data *data =
1122 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1123 	int rule_cnt = rule_idx + 1;
1124 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1125 	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1126 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1127 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1128 	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1129 	u16 inner_mac;
1130 
1131 	/* Reset the ramrod data buffer for the first rule */
1132 	if (rule_idx == 0)
1133 		memset(data, 0, sizeof(*data));
1134 
1135 	/* Set a rule header */
1136 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1137 				      &rule_entry->pair.header);
1138 
1139 	/* Set VLAN and MAC themselves */
1140 	rule_entry->pair.vlan = cpu_to_le16(vlan);
1141 	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1142 			      &rule_entry->pair.mac_mid,
1143 			      &rule_entry->pair.mac_lsb, mac);
1144 	inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1145 	rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
1146 	/* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
1147 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
1148 		struct bnx2x_vlan_mac_obj *target_obj;
1149 
1150 		rule_entry++;
1151 		rule_cnt++;
1152 
1153 		/* Setup ramrod data */
1154 		target_obj = elem->cmd_data.vlan_mac.target_obj;
1155 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
1156 					      true, CLASSIFY_RULE_OPCODE_PAIR,
1157 					      &rule_entry->pair.header);
1158 
1159 		/* Set a VLAN itself */
1160 		rule_entry->pair.vlan = cpu_to_le16(vlan);
1161 		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1162 				      &rule_entry->pair.mac_mid,
1163 				      &rule_entry->pair.mac_lsb, mac);
1164 		rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
1165 	}
1166 
1167 	/* Set the ramrod data header */
1168 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1169 					rule_cnt);
1170 }
1171 
1172 /**
1173  * bnx2x_set_one_vlan_mac_e1h -
1174  *
1175  * @bp:		device handle
1176  * @o:		bnx2x_vlan_mac_obj
1177  * @elem:	bnx2x_exeq_elem
1178  * @rule_idx:	rule_idx
1179  * @cam_offset:	cam_offset
1180  */
1181 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1182 				       struct bnx2x_vlan_mac_obj *o,
1183 				       struct bnx2x_exeq_elem *elem,
1184 				       int rule_idx, int cam_offset)
1185 {
1186 	struct bnx2x_raw_obj *raw = &o->raw;
1187 	struct mac_configuration_cmd *config =
1188 		(struct mac_configuration_cmd *)(raw->rdata);
1189 	/* 57710 and 57711 do not support MOVE command,
1190 	 * so it's either ADD or DEL
1191 	 */
1192 	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1193 		true : false;
1194 
1195 	/* Reset the ramrod data buffer */
1196 	memset(config, 0, sizeof(*config));
1197 
1198 	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1199 				     cam_offset, add,
1200 				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1201 				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1202 				     ETH_VLAN_FILTER_CLASSIFY, config);
1203 }
1204 
1205 /**
1206  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1207  *
1208  * @bp:		device handle
1209  * @p:		command parameters
1210  * @ppos:	pointer to the cookie
1211  *
1212  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1213  * previously configured elements list.
1214  *
1215  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
1216  * into an account
1217  *
1218  * pointer to the cookie  - that should be given back in the next call to make
1219  * function handle the next element. If *ppos is set to NULL it will restart the
1220  * iterator. If returned *ppos == NULL this means that the last element has been
1221  * handled.
1222  *
1223  */
1224 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1225 			   struct bnx2x_vlan_mac_ramrod_params *p,
1226 			   struct bnx2x_vlan_mac_registry_elem **ppos)
1227 {
1228 	struct bnx2x_vlan_mac_registry_elem *pos;
1229 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1230 
1231 	/* If list is empty - there is nothing to do here */
1232 	if (list_empty(&o->head)) {
1233 		*ppos = NULL;
1234 		return 0;
1235 	}
1236 
1237 	/* make a step... */
1238 	if (*ppos == NULL)
1239 		*ppos = list_first_entry(&o->head,
1240 					 struct bnx2x_vlan_mac_registry_elem,
1241 					 link);
1242 	else
1243 		*ppos = list_next_entry(*ppos, link);
1244 
1245 	pos = *ppos;
1246 
1247 	/* If it's the last step - return NULL */
1248 	if (list_is_last(&pos->link, &o->head))
1249 		*ppos = NULL;
1250 
1251 	/* Prepare a 'user_req' */
1252 	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1253 
1254 	/* Set the command */
1255 	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1256 
1257 	/* Set vlan_mac_flags */
1258 	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1259 
1260 	/* Set a restore bit */
1261 	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1262 
1263 	return bnx2x_config_vlan_mac(bp, p);
1264 }
1265 
1266 /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1267  * pointer to an element with a specific criteria and NULL if such an element
1268  * hasn't been found.
1269  */
1270 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1271 	struct bnx2x_exe_queue_obj *o,
1272 	struct bnx2x_exeq_elem *elem)
1273 {
1274 	struct bnx2x_exeq_elem *pos;
1275 	struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1276 
1277 	/* Check pending for execution commands */
1278 	list_for_each_entry(pos, &o->exe_queue, link)
1279 		if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1280 			      sizeof(*data)) &&
1281 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1282 			return pos;
1283 
1284 	return NULL;
1285 }
1286 
1287 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1288 	struct bnx2x_exe_queue_obj *o,
1289 	struct bnx2x_exeq_elem *elem)
1290 {
1291 	struct bnx2x_exeq_elem *pos;
1292 	struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1293 
1294 	/* Check pending for execution commands */
1295 	list_for_each_entry(pos, &o->exe_queue, link)
1296 		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1297 			      sizeof(*data)) &&
1298 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1299 			return pos;
1300 
1301 	return NULL;
1302 }
1303 
1304 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1305 	struct bnx2x_exe_queue_obj *o,
1306 	struct bnx2x_exeq_elem *elem)
1307 {
1308 	struct bnx2x_exeq_elem *pos;
1309 	struct bnx2x_vlan_mac_ramrod_data *data =
1310 		&elem->cmd_data.vlan_mac.u.vlan_mac;
1311 
1312 	/* Check pending for execution commands */
1313 	list_for_each_entry(pos, &o->exe_queue, link)
1314 		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1315 			    sizeof(*data)) &&
1316 		    (pos->cmd_data.vlan_mac.cmd ==
1317 		     elem->cmd_data.vlan_mac.cmd))
1318 			return pos;
1319 
1320 	return NULL;
1321 }
1322 
1323 /**
1324  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1325  *
1326  * @bp:		device handle
1327  * @qo:		bnx2x_qable_obj
1328  * @elem:	bnx2x_exeq_elem
1329  *
1330  * Checks that the requested configuration can be added. If yes and if
1331  * requested, consume CAM credit.
1332  *
1333  * The 'validate' is run after the 'optimize'.
1334  *
1335  */
1336 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1337 					      union bnx2x_qable_obj *qo,
1338 					      struct bnx2x_exeq_elem *elem)
1339 {
1340 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1341 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1342 	int rc;
1343 
1344 	/* Check the registry */
1345 	rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1346 	if (rc) {
1347 		DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1348 		return rc;
1349 	}
1350 
1351 	/* Check if there is a pending ADD command for this
1352 	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1353 	 */
1354 	if (exeq->get(exeq, elem)) {
1355 		DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1356 		return -EEXIST;
1357 	}
1358 
1359 	/* TODO: Check the pending MOVE from other objects where this
1360 	 * object is a destination object.
1361 	 */
1362 
1363 	/* Consume the credit if not requested not to */
1364 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1365 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1366 	    o->get_credit(o)))
1367 		return -EINVAL;
1368 
1369 	return 0;
1370 }
1371 
1372 /**
1373  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1374  *
1375  * @bp:		device handle
1376  * @qo:		quable object to check
1377  * @elem:	element that needs to be deleted
1378  *
1379  * Checks that the requested configuration can be deleted. If yes and if
1380  * requested, returns a CAM credit.
1381  *
1382  * The 'validate' is run after the 'optimize'.
1383  */
1384 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1385 					      union bnx2x_qable_obj *qo,
1386 					      struct bnx2x_exeq_elem *elem)
1387 {
1388 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1389 	struct bnx2x_vlan_mac_registry_elem *pos;
1390 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1391 	struct bnx2x_exeq_elem query_elem;
1392 
1393 	/* If this classification can not be deleted (doesn't exist)
1394 	 * - return a BNX2X_EXIST.
1395 	 */
1396 	pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1397 	if (!pos) {
1398 		DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1399 		return -EEXIST;
1400 	}
1401 
1402 	/* Check if there are pending DEL or MOVE commands for this
1403 	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1404 	 */
1405 	memcpy(&query_elem, elem, sizeof(query_elem));
1406 
1407 	/* Check for MOVE commands */
1408 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1409 	if (exeq->get(exeq, &query_elem)) {
1410 		BNX2X_ERR("There is a pending MOVE command already\n");
1411 		return -EINVAL;
1412 	}
1413 
1414 	/* Check for DEL commands */
1415 	if (exeq->get(exeq, elem)) {
1416 		DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1417 		return -EEXIST;
1418 	}
1419 
1420 	/* Return the credit to the credit pool if not requested not to */
1421 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1422 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1423 	    o->put_credit(o))) {
1424 		BNX2X_ERR("Failed to return a credit\n");
1425 		return -EINVAL;
1426 	}
1427 
1428 	return 0;
1429 }
1430 
1431 /**
1432  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1433  *
1434  * @bp:		device handle
1435  * @qo:		quable object to check (source)
1436  * @elem:	element that needs to be moved
1437  *
1438  * Checks that the requested configuration can be moved. If yes and if
1439  * requested, returns a CAM credit.
1440  *
1441  * The 'validate' is run after the 'optimize'.
1442  */
1443 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1444 					       union bnx2x_qable_obj *qo,
1445 					       struct bnx2x_exeq_elem *elem)
1446 {
1447 	struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1448 	struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1449 	struct bnx2x_exeq_elem query_elem;
1450 	struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1451 	struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1452 
1453 	/* Check if we can perform this operation based on the current registry
1454 	 * state.
1455 	 */
1456 	if (!src_o->check_move(bp, src_o, dest_o,
1457 			       &elem->cmd_data.vlan_mac.u)) {
1458 		DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1459 		return -EINVAL;
1460 	}
1461 
1462 	/* Check if there is an already pending DEL or MOVE command for the
1463 	 * source object or ADD command for a destination object. Return an
1464 	 * error if so.
1465 	 */
1466 	memcpy(&query_elem, elem, sizeof(query_elem));
1467 
1468 	/* Check DEL on source */
1469 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1470 	if (src_exeq->get(src_exeq, &query_elem)) {
1471 		BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1472 		return -EINVAL;
1473 	}
1474 
1475 	/* Check MOVE on source */
1476 	if (src_exeq->get(src_exeq, elem)) {
1477 		DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1478 		return -EEXIST;
1479 	}
1480 
1481 	/* Check ADD on destination */
1482 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1483 	if (dest_exeq->get(dest_exeq, &query_elem)) {
1484 		BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1485 		return -EINVAL;
1486 	}
1487 
1488 	/* Consume the credit if not requested not to */
1489 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1490 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1491 	    dest_o->get_credit(dest_o)))
1492 		return -EINVAL;
1493 
1494 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1495 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1496 	    src_o->put_credit(src_o))) {
1497 		/* return the credit taken from dest... */
1498 		dest_o->put_credit(dest_o);
1499 		return -EINVAL;
1500 	}
1501 
1502 	return 0;
1503 }
1504 
1505 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1506 				   union bnx2x_qable_obj *qo,
1507 				   struct bnx2x_exeq_elem *elem)
1508 {
1509 	switch (elem->cmd_data.vlan_mac.cmd) {
1510 	case BNX2X_VLAN_MAC_ADD:
1511 		return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1512 	case BNX2X_VLAN_MAC_DEL:
1513 		return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1514 	case BNX2X_VLAN_MAC_MOVE:
1515 		return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1516 	default:
1517 		return -EINVAL;
1518 	}
1519 }
1520 
1521 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1522 				  union bnx2x_qable_obj *qo,
1523 				  struct bnx2x_exeq_elem *elem)
1524 {
1525 	int rc = 0;
1526 
1527 	/* If consumption wasn't required, nothing to do */
1528 	if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1529 		     &elem->cmd_data.vlan_mac.vlan_mac_flags))
1530 		return 0;
1531 
1532 	switch (elem->cmd_data.vlan_mac.cmd) {
1533 	case BNX2X_VLAN_MAC_ADD:
1534 	case BNX2X_VLAN_MAC_MOVE:
1535 		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1536 		break;
1537 	case BNX2X_VLAN_MAC_DEL:
1538 		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1539 		break;
1540 	default:
1541 		return -EINVAL;
1542 	}
1543 
1544 	if (rc != true)
1545 		return -EINVAL;
1546 
1547 	return 0;
1548 }
1549 
1550 /**
1551  * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1552  *
1553  * @bp:		device handle
1554  * @o:		bnx2x_vlan_mac_obj
1555  *
1556  */
1557 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1558 			       struct bnx2x_vlan_mac_obj *o)
1559 {
1560 	int cnt = 5000, rc;
1561 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1562 	struct bnx2x_raw_obj *raw = &o->raw;
1563 
1564 	while (cnt--) {
1565 		/* Wait for the current command to complete */
1566 		rc = raw->wait_comp(bp, raw);
1567 		if (rc)
1568 			return rc;
1569 
1570 		/* Wait until there are no pending commands */
1571 		if (!bnx2x_exe_queue_empty(exeq))
1572 			usleep_range(1000, 2000);
1573 		else
1574 			return 0;
1575 	}
1576 
1577 	return -EBUSY;
1578 }
1579 
1580 static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1581 					 struct bnx2x_vlan_mac_obj *o,
1582 					 unsigned long *ramrod_flags)
1583 {
1584 	int rc = 0;
1585 
1586 	spin_lock_bh(&o->exe_queue.lock);
1587 
1588 	DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1589 	rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1590 
1591 	if (rc != 0) {
1592 		__bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1593 
1594 		/* Calling function should not diffrentiate between this case
1595 		 * and the case in which there is already a pending ramrod
1596 		 */
1597 		rc = 1;
1598 	} else {
1599 		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1600 	}
1601 	spin_unlock_bh(&o->exe_queue.lock);
1602 
1603 	return rc;
1604 }
1605 
1606 /**
1607  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1608  *
1609  * @bp:		device handle
1610  * @o:		bnx2x_vlan_mac_obj
1611  * @cqe:
1612  * @cont:	if true schedule next execution chunk
1613  *
1614  */
1615 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1616 				   struct bnx2x_vlan_mac_obj *o,
1617 				   union event_ring_elem *cqe,
1618 				   unsigned long *ramrod_flags)
1619 {
1620 	struct bnx2x_raw_obj *r = &o->raw;
1621 	int rc;
1622 
1623 	/* Clearing the pending list & raw state should be made
1624 	 * atomically (as execution flow assumes they represent the same).
1625 	 */
1626 	spin_lock_bh(&o->exe_queue.lock);
1627 
1628 	/* Reset pending list */
1629 	__bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1630 
1631 	/* Clear pending */
1632 	r->clear_pending(r);
1633 
1634 	spin_unlock_bh(&o->exe_queue.lock);
1635 
1636 	/* If ramrod failed this is most likely a SW bug */
1637 	if (cqe->message.error)
1638 		return -EINVAL;
1639 
1640 	/* Run the next bulk of pending commands if requested */
1641 	if (test_bit(RAMROD_CONT, ramrod_flags)) {
1642 		rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1643 
1644 		if (rc < 0)
1645 			return rc;
1646 	}
1647 
1648 	/* If there is more work to do return PENDING */
1649 	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1650 		return 1;
1651 
1652 	return 0;
1653 }
1654 
1655 /**
1656  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1657  *
1658  * @bp:		device handle
1659  * @o:		bnx2x_qable_obj
1660  * @elem:	bnx2x_exeq_elem
1661  */
1662 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1663 				   union bnx2x_qable_obj *qo,
1664 				   struct bnx2x_exeq_elem *elem)
1665 {
1666 	struct bnx2x_exeq_elem query, *pos;
1667 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1668 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1669 
1670 	memcpy(&query, elem, sizeof(query));
1671 
1672 	switch (elem->cmd_data.vlan_mac.cmd) {
1673 	case BNX2X_VLAN_MAC_ADD:
1674 		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1675 		break;
1676 	case BNX2X_VLAN_MAC_DEL:
1677 		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1678 		break;
1679 	default:
1680 		/* Don't handle anything other than ADD or DEL */
1681 		return 0;
1682 	}
1683 
1684 	/* If we found the appropriate element - delete it */
1685 	pos = exeq->get(exeq, &query);
1686 	if (pos) {
1687 
1688 		/* Return the credit of the optimized command */
1689 		if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1690 			      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1691 			if ((query.cmd_data.vlan_mac.cmd ==
1692 			     BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1693 				BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1694 				return -EINVAL;
1695 			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1696 				BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1697 				return -EINVAL;
1698 			}
1699 		}
1700 
1701 		DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1702 			   (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1703 			   "ADD" : "DEL");
1704 
1705 		list_del(&pos->link);
1706 		bnx2x_exe_queue_free_elem(bp, pos);
1707 		return 1;
1708 	}
1709 
1710 	return 0;
1711 }
1712 
1713 /**
1714  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1715  *
1716  * @bp:	  device handle
1717  * @o:
1718  * @elem:
1719  * @restore:
1720  * @re:
1721  *
1722  * prepare a registry element according to the current command request.
1723  */
1724 static inline int bnx2x_vlan_mac_get_registry_elem(
1725 	struct bnx2x *bp,
1726 	struct bnx2x_vlan_mac_obj *o,
1727 	struct bnx2x_exeq_elem *elem,
1728 	bool restore,
1729 	struct bnx2x_vlan_mac_registry_elem **re)
1730 {
1731 	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1732 	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1733 
1734 	/* Allocate a new registry element if needed. */
1735 	if (!restore &&
1736 	    ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1737 		reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1738 		if (!reg_elem)
1739 			return -ENOMEM;
1740 
1741 		/* Get a new CAM offset */
1742 		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1743 			/* This shall never happen, because we have checked the
1744 			 * CAM availability in the 'validate'.
1745 			 */
1746 			WARN_ON(1);
1747 			kfree(reg_elem);
1748 			return -EINVAL;
1749 		}
1750 
1751 		DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1752 
1753 		/* Set a VLAN-MAC data */
1754 		memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1755 			  sizeof(reg_elem->u));
1756 
1757 		/* Copy the flags (needed for DEL and RESTORE flows) */
1758 		reg_elem->vlan_mac_flags =
1759 			elem->cmd_data.vlan_mac.vlan_mac_flags;
1760 	} else /* DEL, RESTORE */
1761 		reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1762 
1763 	*re = reg_elem;
1764 	return 0;
1765 }
1766 
1767 /**
1768  * bnx2x_execute_vlan_mac - execute vlan mac command
1769  *
1770  * @bp:			device handle
1771  * @qo:
1772  * @exe_chunk:
1773  * @ramrod_flags:
1774  *
1775  * go and send a ramrod!
1776  */
1777 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1778 				  union bnx2x_qable_obj *qo,
1779 				  struct list_head *exe_chunk,
1780 				  unsigned long *ramrod_flags)
1781 {
1782 	struct bnx2x_exeq_elem *elem;
1783 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1784 	struct bnx2x_raw_obj *r = &o->raw;
1785 	int rc, idx = 0;
1786 	bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1787 	bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1788 	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1789 	enum bnx2x_vlan_mac_cmd cmd;
1790 
1791 	/* If DRIVER_ONLY execution is requested, cleanup a registry
1792 	 * and exit. Otherwise send a ramrod to FW.
1793 	 */
1794 	if (!drv_only) {
1795 		WARN_ON(r->check_pending(r));
1796 
1797 		/* Set pending */
1798 		r->set_pending(r);
1799 
1800 		/* Fill the ramrod data */
1801 		list_for_each_entry(elem, exe_chunk, link) {
1802 			cmd = elem->cmd_data.vlan_mac.cmd;
1803 			/* We will add to the target object in MOVE command, so
1804 			 * change the object for a CAM search.
1805 			 */
1806 			if (cmd == BNX2X_VLAN_MAC_MOVE)
1807 				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1808 			else
1809 				cam_obj = o;
1810 
1811 			rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1812 							      elem, restore,
1813 							      &reg_elem);
1814 			if (rc)
1815 				goto error_exit;
1816 
1817 			WARN_ON(!reg_elem);
1818 
1819 			/* Push a new entry into the registry */
1820 			if (!restore &&
1821 			    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1822 			    (cmd == BNX2X_VLAN_MAC_MOVE)))
1823 				list_add(&reg_elem->link, &cam_obj->head);
1824 
1825 			/* Configure a single command in a ramrod data buffer */
1826 			o->set_one_rule(bp, o, elem, idx,
1827 					reg_elem->cam_offset);
1828 
1829 			/* MOVE command consumes 2 entries in the ramrod data */
1830 			if (cmd == BNX2X_VLAN_MAC_MOVE)
1831 				idx += 2;
1832 			else
1833 				idx++;
1834 		}
1835 
1836 		/* No need for an explicit memory barrier here as long we would
1837 		 * need to ensure the ordering of writing to the SPQ element
1838 		 * and updating of the SPQ producer which involves a memory
1839 		 * read and we will have to put a full memory barrier there
1840 		 * (inside bnx2x_sp_post()).
1841 		 */
1842 
1843 		rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1844 				   U64_HI(r->rdata_mapping),
1845 				   U64_LO(r->rdata_mapping),
1846 				   ETH_CONNECTION_TYPE);
1847 		if (rc)
1848 			goto error_exit;
1849 	}
1850 
1851 	/* Now, when we are done with the ramrod - clean up the registry */
1852 	list_for_each_entry(elem, exe_chunk, link) {
1853 		cmd = elem->cmd_data.vlan_mac.cmd;
1854 		if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1855 		    (cmd == BNX2X_VLAN_MAC_MOVE)) {
1856 			reg_elem = o->check_del(bp, o,
1857 						&elem->cmd_data.vlan_mac.u);
1858 
1859 			WARN_ON(!reg_elem);
1860 
1861 			o->put_cam_offset(o, reg_elem->cam_offset);
1862 			list_del(&reg_elem->link);
1863 			kfree(reg_elem);
1864 		}
1865 	}
1866 
1867 	if (!drv_only)
1868 		return 1;
1869 	else
1870 		return 0;
1871 
1872 error_exit:
1873 	r->clear_pending(r);
1874 
1875 	/* Cleanup a registry in case of a failure */
1876 	list_for_each_entry(elem, exe_chunk, link) {
1877 		cmd = elem->cmd_data.vlan_mac.cmd;
1878 
1879 		if (cmd == BNX2X_VLAN_MAC_MOVE)
1880 			cam_obj = elem->cmd_data.vlan_mac.target_obj;
1881 		else
1882 			cam_obj = o;
1883 
1884 		/* Delete all newly added above entries */
1885 		if (!restore &&
1886 		    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1887 		    (cmd == BNX2X_VLAN_MAC_MOVE))) {
1888 			reg_elem = o->check_del(bp, cam_obj,
1889 						&elem->cmd_data.vlan_mac.u);
1890 			if (reg_elem) {
1891 				list_del(&reg_elem->link);
1892 				kfree(reg_elem);
1893 			}
1894 		}
1895 	}
1896 
1897 	return rc;
1898 }
1899 
1900 static inline int bnx2x_vlan_mac_push_new_cmd(
1901 	struct bnx2x *bp,
1902 	struct bnx2x_vlan_mac_ramrod_params *p)
1903 {
1904 	struct bnx2x_exeq_elem *elem;
1905 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1906 	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1907 
1908 	/* Allocate the execution queue element */
1909 	elem = bnx2x_exe_queue_alloc_elem(bp);
1910 	if (!elem)
1911 		return -ENOMEM;
1912 
1913 	/* Set the command 'length' */
1914 	switch (p->user_req.cmd) {
1915 	case BNX2X_VLAN_MAC_MOVE:
1916 		elem->cmd_len = 2;
1917 		break;
1918 	default:
1919 		elem->cmd_len = 1;
1920 	}
1921 
1922 	/* Fill the object specific info */
1923 	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1924 
1925 	/* Try to add a new command to the pending list */
1926 	return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1927 }
1928 
1929 /**
1930  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1931  *
1932  * @bp:	  device handle
1933  * @p:
1934  *
1935  */
1936 int bnx2x_config_vlan_mac(struct bnx2x *bp,
1937 			   struct bnx2x_vlan_mac_ramrod_params *p)
1938 {
1939 	int rc = 0;
1940 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1941 	unsigned long *ramrod_flags = &p->ramrod_flags;
1942 	bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1943 	struct bnx2x_raw_obj *raw = &o->raw;
1944 
1945 	/*
1946 	 * Add new elements to the execution list for commands that require it.
1947 	 */
1948 	if (!cont) {
1949 		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1950 		if (rc)
1951 			return rc;
1952 	}
1953 
1954 	/* If nothing will be executed further in this iteration we want to
1955 	 * return PENDING if there are pending commands
1956 	 */
1957 	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1958 		rc = 1;
1959 
1960 	if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1961 		DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1962 		raw->clear_pending(raw);
1963 	}
1964 
1965 	/* Execute commands if required */
1966 	if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1967 	    test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1968 		rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1969 						   &p->ramrod_flags);
1970 		if (rc < 0)
1971 			return rc;
1972 	}
1973 
1974 	/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1975 	 * then user want to wait until the last command is done.
1976 	 */
1977 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1978 		/* Wait maximum for the current exe_queue length iterations plus
1979 		 * one (for the current pending command).
1980 		 */
1981 		int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1982 
1983 		while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1984 		       max_iterations--) {
1985 
1986 			/* Wait for the current command to complete */
1987 			rc = raw->wait_comp(bp, raw);
1988 			if (rc)
1989 				return rc;
1990 
1991 			/* Make a next step */
1992 			rc = __bnx2x_vlan_mac_execute_step(bp,
1993 							   p->vlan_mac_obj,
1994 							   &p->ramrod_flags);
1995 			if (rc < 0)
1996 				return rc;
1997 		}
1998 
1999 		return 0;
2000 	}
2001 
2002 	return rc;
2003 }
2004 
2005 /**
2006  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2007  *
2008  * @bp:			device handle
2009  * @o:
2010  * @vlan_mac_flags:
2011  * @ramrod_flags:	execution flags to be used for this deletion
2012  *
2013  * if the last operation has completed successfully and there are no
2014  * more elements left, positive value if the last operation has completed
2015  * successfully and there are more previously configured elements, negative
2016  * value is current operation has failed.
2017  */
2018 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2019 				  struct bnx2x_vlan_mac_obj *o,
2020 				  unsigned long *vlan_mac_flags,
2021 				  unsigned long *ramrod_flags)
2022 {
2023 	struct bnx2x_vlan_mac_registry_elem *pos = NULL;
2024 	struct bnx2x_vlan_mac_ramrod_params p;
2025 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2026 	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2027 	unsigned long flags;
2028 	int read_lock;
2029 	int rc = 0;
2030 
2031 	/* Clear pending commands first */
2032 
2033 	spin_lock_bh(&exeq->lock);
2034 
2035 	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
2036 		flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2037 		if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2038 		    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2039 			rc = exeq->remove(bp, exeq->owner, exeq_pos);
2040 			if (rc) {
2041 				BNX2X_ERR("Failed to remove command\n");
2042 				spin_unlock_bh(&exeq->lock);
2043 				return rc;
2044 			}
2045 			list_del(&exeq_pos->link);
2046 			bnx2x_exe_queue_free_elem(bp, exeq_pos);
2047 		}
2048 	}
2049 
2050 	spin_unlock_bh(&exeq->lock);
2051 
2052 	/* Prepare a command request */
2053 	memset(&p, 0, sizeof(p));
2054 	p.vlan_mac_obj = o;
2055 	p.ramrod_flags = *ramrod_flags;
2056 	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
2057 
2058 	/* Add all but the last VLAN-MAC to the execution queue without actually
2059 	 * execution anything.
2060 	 */
2061 	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
2062 	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
2063 	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
2064 
2065 	DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2066 	read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
2067 	if (read_lock != 0)
2068 		return read_lock;
2069 
2070 	list_for_each_entry(pos, &o->head, link) {
2071 		flags = pos->vlan_mac_flags;
2072 		if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2073 		    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2074 			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2075 			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2076 			rc = bnx2x_config_vlan_mac(bp, &p);
2077 			if (rc < 0) {
2078 				BNX2X_ERR("Failed to add a new DEL command\n");
2079 				bnx2x_vlan_mac_h_read_unlock(bp, o);
2080 				return rc;
2081 			}
2082 		}
2083 	}
2084 
2085 	DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2086 	bnx2x_vlan_mac_h_read_unlock(bp, o);
2087 
2088 	p.ramrod_flags = *ramrod_flags;
2089 	__set_bit(RAMROD_CONT, &p.ramrod_flags);
2090 
2091 	return bnx2x_config_vlan_mac(bp, &p);
2092 }
2093 
2094 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
2095 	u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
2096 	unsigned long *pstate, bnx2x_obj_type type)
2097 {
2098 	raw->func_id = func_id;
2099 	raw->cid = cid;
2100 	raw->cl_id = cl_id;
2101 	raw->rdata = rdata;
2102 	raw->rdata_mapping = rdata_mapping;
2103 	raw->state = state;
2104 	raw->pstate = pstate;
2105 	raw->obj_type = type;
2106 	raw->check_pending = bnx2x_raw_check_pending;
2107 	raw->clear_pending = bnx2x_raw_clear_pending;
2108 	raw->set_pending = bnx2x_raw_set_pending;
2109 	raw->wait_comp = bnx2x_raw_wait;
2110 }
2111 
2112 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
2113 	u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
2114 	int state, unsigned long *pstate, bnx2x_obj_type type,
2115 	struct bnx2x_credit_pool_obj *macs_pool,
2116 	struct bnx2x_credit_pool_obj *vlans_pool)
2117 {
2118 	INIT_LIST_HEAD(&o->head);
2119 	o->head_reader = 0;
2120 	o->head_exe_request = false;
2121 	o->saved_ramrod_flags = 0;
2122 
2123 	o->macs_pool = macs_pool;
2124 	o->vlans_pool = vlans_pool;
2125 
2126 	o->delete_all = bnx2x_vlan_mac_del_all;
2127 	o->restore = bnx2x_vlan_mac_restore;
2128 	o->complete = bnx2x_complete_vlan_mac;
2129 	o->wait = bnx2x_wait_vlan_mac;
2130 
2131 	bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2132 			   state, pstate, type);
2133 }
2134 
2135 void bnx2x_init_mac_obj(struct bnx2x *bp,
2136 			struct bnx2x_vlan_mac_obj *mac_obj,
2137 			u8 cl_id, u32 cid, u8 func_id, void *rdata,
2138 			dma_addr_t rdata_mapping, int state,
2139 			unsigned long *pstate, bnx2x_obj_type type,
2140 			struct bnx2x_credit_pool_obj *macs_pool)
2141 {
2142 	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
2143 
2144 	bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2145 				   rdata_mapping, state, pstate, type,
2146 				   macs_pool, NULL);
2147 
2148 	/* CAM credit pool handling */
2149 	mac_obj->get_credit = bnx2x_get_credit_mac;
2150 	mac_obj->put_credit = bnx2x_put_credit_mac;
2151 	mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2152 	mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2153 
2154 	if (CHIP_IS_E1x(bp)) {
2155 		mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
2156 		mac_obj->check_del         = bnx2x_check_mac_del;
2157 		mac_obj->check_add         = bnx2x_check_mac_add;
2158 		mac_obj->check_move        = bnx2x_check_move_always_err;
2159 		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2160 
2161 		/* Exe Queue */
2162 		bnx2x_exe_queue_init(bp,
2163 				     &mac_obj->exe_queue, 1, qable_obj,
2164 				     bnx2x_validate_vlan_mac,
2165 				     bnx2x_remove_vlan_mac,
2166 				     bnx2x_optimize_vlan_mac,
2167 				     bnx2x_execute_vlan_mac,
2168 				     bnx2x_exeq_get_mac);
2169 	} else {
2170 		mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
2171 		mac_obj->check_del         = bnx2x_check_mac_del;
2172 		mac_obj->check_add         = bnx2x_check_mac_add;
2173 		mac_obj->check_move        = bnx2x_check_move;
2174 		mac_obj->ramrod_cmd        =
2175 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2176 		mac_obj->get_n_elements    = bnx2x_get_n_elements;
2177 
2178 		/* Exe Queue */
2179 		bnx2x_exe_queue_init(bp,
2180 				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2181 				     qable_obj, bnx2x_validate_vlan_mac,
2182 				     bnx2x_remove_vlan_mac,
2183 				     bnx2x_optimize_vlan_mac,
2184 				     bnx2x_execute_vlan_mac,
2185 				     bnx2x_exeq_get_mac);
2186 	}
2187 }
2188 
2189 void bnx2x_init_vlan_obj(struct bnx2x *bp,
2190 			 struct bnx2x_vlan_mac_obj *vlan_obj,
2191 			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2192 			 dma_addr_t rdata_mapping, int state,
2193 			 unsigned long *pstate, bnx2x_obj_type type,
2194 			 struct bnx2x_credit_pool_obj *vlans_pool)
2195 {
2196 	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2197 
2198 	bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2199 				   rdata_mapping, state, pstate, type, NULL,
2200 				   vlans_pool);
2201 
2202 	vlan_obj->get_credit = bnx2x_get_credit_vlan;
2203 	vlan_obj->put_credit = bnx2x_put_credit_vlan;
2204 	vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2205 	vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2206 
2207 	if (CHIP_IS_E1x(bp)) {
2208 		BNX2X_ERR("Do not support chips others than E2 and newer\n");
2209 		BUG();
2210 	} else {
2211 		vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
2212 		vlan_obj->check_del         = bnx2x_check_vlan_del;
2213 		vlan_obj->check_add         = bnx2x_check_vlan_add;
2214 		vlan_obj->check_move        = bnx2x_check_move;
2215 		vlan_obj->ramrod_cmd        =
2216 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2217 		vlan_obj->get_n_elements    = bnx2x_get_n_elements;
2218 
2219 		/* Exe Queue */
2220 		bnx2x_exe_queue_init(bp,
2221 				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2222 				     qable_obj, bnx2x_validate_vlan_mac,
2223 				     bnx2x_remove_vlan_mac,
2224 				     bnx2x_optimize_vlan_mac,
2225 				     bnx2x_execute_vlan_mac,
2226 				     bnx2x_exeq_get_vlan);
2227 	}
2228 }
2229 
2230 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2231 			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2232 			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
2233 			     dma_addr_t rdata_mapping, int state,
2234 			     unsigned long *pstate, bnx2x_obj_type type,
2235 			     struct bnx2x_credit_pool_obj *macs_pool,
2236 			     struct bnx2x_credit_pool_obj *vlans_pool)
2237 {
2238 	union bnx2x_qable_obj *qable_obj =
2239 		(union bnx2x_qable_obj *)vlan_mac_obj;
2240 
2241 	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2242 				   rdata_mapping, state, pstate, type,
2243 				   macs_pool, vlans_pool);
2244 
2245 	/* CAM pool handling */
2246 	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2247 	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2248 	/* CAM offset is relevant for 57710 and 57711 chips only which have a
2249 	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2250 	 * will be taken from MACs' pool object only.
2251 	 */
2252 	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2253 	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2254 
2255 	if (CHIP_IS_E1(bp)) {
2256 		BNX2X_ERR("Do not support chips others than E2\n");
2257 		BUG();
2258 	} else if (CHIP_IS_E1H(bp)) {
2259 		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
2260 		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2261 		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2262 		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
2263 		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2264 
2265 		/* Exe Queue */
2266 		bnx2x_exe_queue_init(bp,
2267 				     &vlan_mac_obj->exe_queue, 1, qable_obj,
2268 				     bnx2x_validate_vlan_mac,
2269 				     bnx2x_remove_vlan_mac,
2270 				     bnx2x_optimize_vlan_mac,
2271 				     bnx2x_execute_vlan_mac,
2272 				     bnx2x_exeq_get_vlan_mac);
2273 	} else {
2274 		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
2275 		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2276 		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2277 		vlan_mac_obj->check_move        = bnx2x_check_move;
2278 		vlan_mac_obj->ramrod_cmd        =
2279 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2280 
2281 		/* Exe Queue */
2282 		bnx2x_exe_queue_init(bp,
2283 				     &vlan_mac_obj->exe_queue,
2284 				     CLASSIFY_RULES_COUNT,
2285 				     qable_obj, bnx2x_validate_vlan_mac,
2286 				     bnx2x_remove_vlan_mac,
2287 				     bnx2x_optimize_vlan_mac,
2288 				     bnx2x_execute_vlan_mac,
2289 				     bnx2x_exeq_get_vlan_mac);
2290 	}
2291 }
2292 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2293 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2294 			struct tstorm_eth_mac_filter_config *mac_filters,
2295 			u16 pf_id)
2296 {
2297 	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2298 
2299 	u32 addr = BAR_TSTRORM_INTMEM +
2300 			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2301 
2302 	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2303 }
2304 
2305 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2306 				 struct bnx2x_rx_mode_ramrod_params *p)
2307 {
2308 	/* update the bp MAC filter structure */
2309 	u32 mask = (1 << p->cl_id);
2310 
2311 	struct tstorm_eth_mac_filter_config *mac_filters =
2312 		(struct tstorm_eth_mac_filter_config *)p->rdata;
2313 
2314 	/* initial setting is drop-all */
2315 	u8 drop_all_ucast = 1, drop_all_mcast = 1;
2316 	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2317 	u8 unmatched_unicast = 0;
2318 
2319     /* In e1x there we only take into account rx accept flag since tx switching
2320      * isn't enabled. */
2321 	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2322 		/* accept matched ucast */
2323 		drop_all_ucast = 0;
2324 
2325 	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2326 		/* accept matched mcast */
2327 		drop_all_mcast = 0;
2328 
2329 	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2330 		/* accept all mcast */
2331 		drop_all_ucast = 0;
2332 		accp_all_ucast = 1;
2333 	}
2334 	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2335 		/* accept all mcast */
2336 		drop_all_mcast = 0;
2337 		accp_all_mcast = 1;
2338 	}
2339 	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2340 		/* accept (all) bcast */
2341 		accp_all_bcast = 1;
2342 	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2343 		/* accept unmatched unicasts */
2344 		unmatched_unicast = 1;
2345 
2346 	mac_filters->ucast_drop_all = drop_all_ucast ?
2347 		mac_filters->ucast_drop_all | mask :
2348 		mac_filters->ucast_drop_all & ~mask;
2349 
2350 	mac_filters->mcast_drop_all = drop_all_mcast ?
2351 		mac_filters->mcast_drop_all | mask :
2352 		mac_filters->mcast_drop_all & ~mask;
2353 
2354 	mac_filters->ucast_accept_all = accp_all_ucast ?
2355 		mac_filters->ucast_accept_all | mask :
2356 		mac_filters->ucast_accept_all & ~mask;
2357 
2358 	mac_filters->mcast_accept_all = accp_all_mcast ?
2359 		mac_filters->mcast_accept_all | mask :
2360 		mac_filters->mcast_accept_all & ~mask;
2361 
2362 	mac_filters->bcast_accept_all = accp_all_bcast ?
2363 		mac_filters->bcast_accept_all | mask :
2364 		mac_filters->bcast_accept_all & ~mask;
2365 
2366 	mac_filters->unmatched_unicast = unmatched_unicast ?
2367 		mac_filters->unmatched_unicast | mask :
2368 		mac_filters->unmatched_unicast & ~mask;
2369 
2370 	DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2371 			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2372 	   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2373 	   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2374 	   mac_filters->bcast_accept_all);
2375 
2376 	/* write the MAC filter structure*/
2377 	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
2378 
2379 	/* The operation is completed */
2380 	clear_bit(p->state, p->pstate);
2381 	smp_mb__after_atomic();
2382 
2383 	return 0;
2384 }
2385 
2386 /* Setup ramrod data */
2387 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2388 				struct eth_classify_header *hdr,
2389 				u8 rule_cnt)
2390 {
2391 	hdr->echo = cpu_to_le32(cid);
2392 	hdr->rule_cnt = rule_cnt;
2393 }
2394 
2395 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2396 				unsigned long *accept_flags,
2397 				struct eth_filter_rules_cmd *cmd,
2398 				bool clear_accept_all)
2399 {
2400 	u16 state;
2401 
2402 	/* start with 'drop-all' */
2403 	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2404 		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2405 
2406 	if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2407 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2408 
2409 	if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2410 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2411 
2412 	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2413 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2414 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2415 	}
2416 
2417 	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2418 		state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2419 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2420 	}
2421 
2422 	if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2423 		state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2424 
2425 	if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2426 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2427 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2428 	}
2429 
2430 	if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2431 		state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2432 
2433 	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2434 	if (clear_accept_all) {
2435 		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2436 		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2437 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2438 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2439 	}
2440 
2441 	cmd->state = cpu_to_le16(state);
2442 }
2443 
2444 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2445 				struct bnx2x_rx_mode_ramrod_params *p)
2446 {
2447 	struct eth_filter_rules_ramrod_data *data = p->rdata;
2448 	int rc;
2449 	u8 rule_idx = 0;
2450 
2451 	/* Reset the ramrod data buffer */
2452 	memset(data, 0, sizeof(*data));
2453 
2454 	/* Setup ramrod data */
2455 
2456 	/* Tx (internal switching) */
2457 	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2458 		data->rules[rule_idx].client_id = p->cl_id;
2459 		data->rules[rule_idx].func_id = p->func_id;
2460 
2461 		data->rules[rule_idx].cmd_general_data =
2462 			ETH_FILTER_RULES_CMD_TX_CMD;
2463 
2464 		bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2465 					       &(data->rules[rule_idx++]),
2466 					       false);
2467 	}
2468 
2469 	/* Rx */
2470 	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2471 		data->rules[rule_idx].client_id = p->cl_id;
2472 		data->rules[rule_idx].func_id = p->func_id;
2473 
2474 		data->rules[rule_idx].cmd_general_data =
2475 			ETH_FILTER_RULES_CMD_RX_CMD;
2476 
2477 		bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2478 					       &(data->rules[rule_idx++]),
2479 					       false);
2480 	}
2481 
2482 	/* If FCoE Queue configuration has been requested configure the Rx and
2483 	 * internal switching modes for this queue in separate rules.
2484 	 *
2485 	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2486 	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2487 	 */
2488 	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2489 		/*  Tx (internal switching) */
2490 		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2491 			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2492 			data->rules[rule_idx].func_id = p->func_id;
2493 
2494 			data->rules[rule_idx].cmd_general_data =
2495 						ETH_FILTER_RULES_CMD_TX_CMD;
2496 
2497 			bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2498 						       &(data->rules[rule_idx]),
2499 						       true);
2500 			rule_idx++;
2501 		}
2502 
2503 		/* Rx */
2504 		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2505 			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2506 			data->rules[rule_idx].func_id = p->func_id;
2507 
2508 			data->rules[rule_idx].cmd_general_data =
2509 						ETH_FILTER_RULES_CMD_RX_CMD;
2510 
2511 			bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2512 						       &(data->rules[rule_idx]),
2513 						       true);
2514 			rule_idx++;
2515 		}
2516 	}
2517 
2518 	/* Set the ramrod header (most importantly - number of rules to
2519 	 * configure).
2520 	 */
2521 	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2522 
2523 	DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2524 			 data->header.rule_cnt, p->rx_accept_flags,
2525 			 p->tx_accept_flags);
2526 
2527 	/* No need for an explicit memory barrier here as long as we
2528 	 * ensure the ordering of writing to the SPQ element
2529 	 * and updating of the SPQ producer which involves a memory
2530 	 * read. If the memory read is removed we will have to put a
2531 	 * full memory barrier there (inside bnx2x_sp_post()).
2532 	 */
2533 
2534 	/* Send a ramrod */
2535 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2536 			   U64_HI(p->rdata_mapping),
2537 			   U64_LO(p->rdata_mapping),
2538 			   ETH_CONNECTION_TYPE);
2539 	if (rc)
2540 		return rc;
2541 
2542 	/* Ramrod completion is pending */
2543 	return 1;
2544 }
2545 
2546 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2547 				      struct bnx2x_rx_mode_ramrod_params *p)
2548 {
2549 	return bnx2x_state_wait(bp, p->state, p->pstate);
2550 }
2551 
2552 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2553 				    struct bnx2x_rx_mode_ramrod_params *p)
2554 {
2555 	/* Do nothing */
2556 	return 0;
2557 }
2558 
2559 int bnx2x_config_rx_mode(struct bnx2x *bp,
2560 			 struct bnx2x_rx_mode_ramrod_params *p)
2561 {
2562 	int rc;
2563 
2564 	/* Configure the new classification in the chip */
2565 	rc = p->rx_mode_obj->config_rx_mode(bp, p);
2566 	if (rc < 0)
2567 		return rc;
2568 
2569 	/* Wait for a ramrod completion if was requested */
2570 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2571 		rc = p->rx_mode_obj->wait_comp(bp, p);
2572 		if (rc)
2573 			return rc;
2574 	}
2575 
2576 	return rc;
2577 }
2578 
2579 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2580 			    struct bnx2x_rx_mode_obj *o)
2581 {
2582 	if (CHIP_IS_E1x(bp)) {
2583 		o->wait_comp      = bnx2x_empty_rx_mode_wait;
2584 		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2585 	} else {
2586 		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2587 		o->config_rx_mode = bnx2x_set_rx_mode_e2;
2588 	}
2589 }
2590 
2591 /********************* Multicast verbs: SET, CLEAR ****************************/
2592 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2593 {
2594 	return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2595 }
2596 
2597 struct bnx2x_mcast_mac_elem {
2598 	struct list_head link;
2599 	u8 mac[ETH_ALEN];
2600 	u8 pad[2]; /* For a natural alignment of the following buffer */
2601 };
2602 
2603 struct bnx2x_mcast_bin_elem {
2604 	struct list_head link;
2605 	int bin;
2606 	int type; /* BNX2X_MCAST_CMD_SET_{ADD, DEL} */
2607 };
2608 
2609 struct bnx2x_pending_mcast_cmd {
2610 	struct list_head link;
2611 	int type; /* BNX2X_MCAST_CMD_X */
2612 	union {
2613 		struct list_head macs_head;
2614 		u32 macs_num; /* Needed for DEL command */
2615 		int next_bin; /* Needed for RESTORE flow with aprox match */
2616 	} data;
2617 
2618 	bool set_convert; /* in case type == BNX2X_MCAST_CMD_SET, this is set
2619 			   * when macs_head had been converted to a list of
2620 			   * bnx2x_mcast_bin_elem.
2621 			   */
2622 
2623 	bool done; /* set to true, when the command has been handled,
2624 		    * practically used in 57712 handling only, where one pending
2625 		    * command may be handled in a few operations. As long as for
2626 		    * other chips every operation handling is completed in a
2627 		    * single ramrod, there is no need to utilize this field.
2628 		    */
2629 };
2630 
2631 static int bnx2x_mcast_wait(struct bnx2x *bp,
2632 			    struct bnx2x_mcast_obj *o)
2633 {
2634 	if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2635 			o->raw.wait_comp(bp, &o->raw))
2636 		return -EBUSY;
2637 
2638 	return 0;
2639 }
2640 
2641 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2642 				   struct bnx2x_mcast_obj *o,
2643 				   struct bnx2x_mcast_ramrod_params *p,
2644 				   enum bnx2x_mcast_cmd cmd)
2645 {
2646 	int total_sz;
2647 	struct bnx2x_pending_mcast_cmd *new_cmd;
2648 	struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2649 	struct bnx2x_mcast_list_elem *pos;
2650 	int macs_list_len = 0, macs_list_len_size;
2651 
2652 	/* When adding MACs we'll need to store their values */
2653 	if (cmd == BNX2X_MCAST_CMD_ADD || cmd == BNX2X_MCAST_CMD_SET)
2654 		macs_list_len = p->mcast_list_len;
2655 
2656 	/* If the command is empty ("handle pending commands only"), break */
2657 	if (!p->mcast_list_len)
2658 		return 0;
2659 
2660 	/* For a set command, we need to allocate sufficient memory for all
2661 	 * the bins, since we can't analyze at this point how much memory would
2662 	 * be required.
2663 	 */
2664 	macs_list_len_size = macs_list_len *
2665 			     sizeof(struct bnx2x_mcast_mac_elem);
2666 	if (cmd == BNX2X_MCAST_CMD_SET) {
2667 		int bin_size = BNX2X_MCAST_BINS_NUM *
2668 			       sizeof(struct bnx2x_mcast_bin_elem);
2669 
2670 		if (bin_size > macs_list_len_size)
2671 			macs_list_len_size = bin_size;
2672 	}
2673 	total_sz = sizeof(*new_cmd) + macs_list_len_size;
2674 
2675 	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2676 	new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2677 
2678 	if (!new_cmd)
2679 		return -ENOMEM;
2680 
2681 	DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2682 	   cmd, macs_list_len);
2683 
2684 	INIT_LIST_HEAD(&new_cmd->data.macs_head);
2685 
2686 	new_cmd->type = cmd;
2687 	new_cmd->done = false;
2688 
2689 	switch (cmd) {
2690 	case BNX2X_MCAST_CMD_ADD:
2691 	case BNX2X_MCAST_CMD_SET:
2692 		cur_mac = (struct bnx2x_mcast_mac_elem *)
2693 			  ((u8 *)new_cmd + sizeof(*new_cmd));
2694 
2695 		/* Push the MACs of the current command into the pending command
2696 		 * MACs list: FIFO
2697 		 */
2698 		list_for_each_entry(pos, &p->mcast_list, link) {
2699 			memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2700 			list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2701 			cur_mac++;
2702 		}
2703 
2704 		break;
2705 
2706 	case BNX2X_MCAST_CMD_DEL:
2707 		new_cmd->data.macs_num = p->mcast_list_len;
2708 		break;
2709 
2710 	case BNX2X_MCAST_CMD_RESTORE:
2711 		new_cmd->data.next_bin = 0;
2712 		break;
2713 
2714 	default:
2715 		kfree(new_cmd);
2716 		BNX2X_ERR("Unknown command: %d\n", cmd);
2717 		return -EINVAL;
2718 	}
2719 
2720 	/* Push the new pending command to the tail of the pending list: FIFO */
2721 	list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2722 
2723 	o->set_sched(o);
2724 
2725 	return 1;
2726 }
2727 
2728 /**
2729  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2730  *
2731  * @o:
2732  * @last:	index to start looking from (including)
2733  *
2734  * returns the next found (set) bin or a negative value if none is found.
2735  */
2736 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2737 {
2738 	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2739 
2740 	for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2741 		if (o->registry.aprox_match.vec[i])
2742 			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2743 				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2744 				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2745 						       vec, cur_bit)) {
2746 					return cur_bit;
2747 				}
2748 			}
2749 		inner_start = 0;
2750 	}
2751 
2752 	/* None found */
2753 	return -1;
2754 }
2755 
2756 /**
2757  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2758  *
2759  * @o:
2760  *
2761  * returns the index of the found bin or -1 if none is found
2762  */
2763 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2764 {
2765 	int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2766 
2767 	if (cur_bit >= 0)
2768 		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2769 
2770 	return cur_bit;
2771 }
2772 
2773 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2774 {
2775 	struct bnx2x_raw_obj *raw = &o->raw;
2776 	u8 rx_tx_flag = 0;
2777 
2778 	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2779 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2780 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2781 
2782 	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2783 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2784 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2785 
2786 	return rx_tx_flag;
2787 }
2788 
2789 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2790 					struct bnx2x_mcast_obj *o, int idx,
2791 					union bnx2x_mcast_config_data *cfg_data,
2792 					enum bnx2x_mcast_cmd cmd)
2793 {
2794 	struct bnx2x_raw_obj *r = &o->raw;
2795 	struct eth_multicast_rules_ramrod_data *data =
2796 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2797 	u8 func_id = r->func_id;
2798 	u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2799 	int bin;
2800 
2801 	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE) ||
2802 	    (cmd == BNX2X_MCAST_CMD_SET_ADD))
2803 		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2804 
2805 	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2806 
2807 	/* Get a bin and update a bins' vector */
2808 	switch (cmd) {
2809 	case BNX2X_MCAST_CMD_ADD:
2810 		bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2811 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2812 		break;
2813 
2814 	case BNX2X_MCAST_CMD_DEL:
2815 		/* If there were no more bins to clear
2816 		 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2817 		 * clear any (0xff) bin.
2818 		 * See bnx2x_mcast_validate_e2() for explanation when it may
2819 		 * happen.
2820 		 */
2821 		bin = bnx2x_mcast_clear_first_bin(o);
2822 		break;
2823 
2824 	case BNX2X_MCAST_CMD_RESTORE:
2825 		bin = cfg_data->bin;
2826 		break;
2827 
2828 	case BNX2X_MCAST_CMD_SET_ADD:
2829 		bin = cfg_data->bin;
2830 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2831 		break;
2832 
2833 	case BNX2X_MCAST_CMD_SET_DEL:
2834 		bin = cfg_data->bin;
2835 		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, bin);
2836 		break;
2837 
2838 	default:
2839 		BNX2X_ERR("Unknown command: %d\n", cmd);
2840 		return;
2841 	}
2842 
2843 	DP(BNX2X_MSG_SP, "%s bin %d\n",
2844 			 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2845 			 "Setting"  : "Clearing"), bin);
2846 
2847 	data->rules[idx].bin_id    = (u8)bin;
2848 	data->rules[idx].func_id   = func_id;
2849 	data->rules[idx].engine_id = o->engine_id;
2850 }
2851 
2852 /**
2853  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2854  *
2855  * @bp:		device handle
2856  * @o:
2857  * @start_bin:	index in the registry to start from (including)
2858  * @rdata_idx:	index in the ramrod data to start from
2859  *
2860  * returns last handled bin index or -1 if all bins have been handled
2861  */
2862 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2863 	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2864 	int *rdata_idx)
2865 {
2866 	int cur_bin, cnt = *rdata_idx;
2867 	union bnx2x_mcast_config_data cfg_data = {NULL};
2868 
2869 	/* go through the registry and configure the bins from it */
2870 	for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2871 	    cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2872 
2873 		cfg_data.bin = (u8)cur_bin;
2874 		o->set_one_rule(bp, o, cnt, &cfg_data,
2875 				BNX2X_MCAST_CMD_RESTORE);
2876 
2877 		cnt++;
2878 
2879 		DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2880 
2881 		/* Break if we reached the maximum number
2882 		 * of rules.
2883 		 */
2884 		if (cnt >= o->max_cmd_len)
2885 			break;
2886 	}
2887 
2888 	*rdata_idx = cnt;
2889 
2890 	return cur_bin;
2891 }
2892 
2893 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2894 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2895 	int *line_idx)
2896 {
2897 	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2898 	int cnt = *line_idx;
2899 	union bnx2x_mcast_config_data cfg_data = {NULL};
2900 
2901 	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2902 				 link) {
2903 
2904 		cfg_data.mac = &pmac_pos->mac[0];
2905 		o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2906 
2907 		cnt++;
2908 
2909 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2910 		   pmac_pos->mac);
2911 
2912 		list_del(&pmac_pos->link);
2913 
2914 		/* Break if we reached the maximum number
2915 		 * of rules.
2916 		 */
2917 		if (cnt >= o->max_cmd_len)
2918 			break;
2919 	}
2920 
2921 	*line_idx = cnt;
2922 
2923 	/* if no more MACs to configure - we are done */
2924 	if (list_empty(&cmd_pos->data.macs_head))
2925 		cmd_pos->done = true;
2926 }
2927 
2928 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2929 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2930 	int *line_idx)
2931 {
2932 	int cnt = *line_idx;
2933 
2934 	while (cmd_pos->data.macs_num) {
2935 		o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2936 
2937 		cnt++;
2938 
2939 		cmd_pos->data.macs_num--;
2940 
2941 		  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2942 				   cmd_pos->data.macs_num, cnt);
2943 
2944 		/* Break if we reached the maximum
2945 		 * number of rules.
2946 		 */
2947 		if (cnt >= o->max_cmd_len)
2948 			break;
2949 	}
2950 
2951 	*line_idx = cnt;
2952 
2953 	/* If we cleared all bins - we are done */
2954 	if (!cmd_pos->data.macs_num)
2955 		cmd_pos->done = true;
2956 }
2957 
2958 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2959 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2960 	int *line_idx)
2961 {
2962 	cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2963 						line_idx);
2964 
2965 	if (cmd_pos->data.next_bin < 0)
2966 		/* If o->set_restore returned -1 we are done */
2967 		cmd_pos->done = true;
2968 	else
2969 		/* Start from the next bin next time */
2970 		cmd_pos->data.next_bin++;
2971 }
2972 
2973 static void
2974 bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp,
2975 				       struct bnx2x_mcast_obj *o,
2976 				       struct bnx2x_pending_mcast_cmd *cmd_pos)
2977 {
2978 	u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ];
2979 	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2980 	struct bnx2x_mcast_bin_elem *p_item;
2981 	int i, cnt = 0, mac_cnt = 0;
2982 
2983 	memset(req, 0, sizeof(u64) * BNX2X_MCAST_VEC_SZ);
2984 	memcpy(cur, o->registry.aprox_match.vec,
2985 	       sizeof(u64) * BNX2X_MCAST_VEC_SZ);
2986 
2987 	/* Fill `current' with the required set of bins to configure */
2988 	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2989 				 link) {
2990 		int bin = bnx2x_mcast_bin_from_mac(pmac_pos->mac);
2991 
2992 		DP(BNX2X_MSG_SP, "Set contains %pM mcast MAC\n",
2993 		   pmac_pos->mac);
2994 
2995 		BIT_VEC64_SET_BIT(req, bin);
2996 		list_del(&pmac_pos->link);
2997 		mac_cnt++;
2998 	}
2999 
3000 	/* We no longer have use for the MACs; Need to re-use memory for
3001 	 * a list that will be used to configure bins.
3002 	 */
3003 	cmd_pos->set_convert = true;
3004 	p_item = (struct bnx2x_mcast_bin_elem *)(cmd_pos + 1);
3005 	INIT_LIST_HEAD(&cmd_pos->data.macs_head);
3006 
3007 	for (i = 0; i < BNX2X_MCAST_BINS_NUM; i++) {
3008 		bool b_current = !!BIT_VEC64_TEST_BIT(cur, i);
3009 		bool b_required = !!BIT_VEC64_TEST_BIT(req, i);
3010 
3011 		if (b_current == b_required)
3012 			continue;
3013 
3014 		p_item->bin = i;
3015 		p_item->type = b_required ? BNX2X_MCAST_CMD_SET_ADD
3016 					  : BNX2X_MCAST_CMD_SET_DEL;
3017 		list_add_tail(&p_item->link , &cmd_pos->data.macs_head);
3018 		p_item++;
3019 		cnt++;
3020 	}
3021 
3022 	/* We now definitely know how many commands are hiding here.
3023 	 * Also need to correct the disruption we've added to guarantee this
3024 	 * would be enqueued.
3025 	 */
3026 	o->total_pending_num -= (o->max_cmd_len + mac_cnt);
3027 	o->total_pending_num += cnt;
3028 
3029 	DP(BNX2X_MSG_SP, "o->total_pending_num=%d\n", o->total_pending_num);
3030 }
3031 
3032 static void
3033 bnx2x_mcast_hdl_pending_set_e2(struct bnx2x *bp,
3034 			       struct bnx2x_mcast_obj *o,
3035 			       struct bnx2x_pending_mcast_cmd *cmd_pos,
3036 			       int *cnt)
3037 {
3038 	union bnx2x_mcast_config_data cfg_data = {NULL};
3039 	struct bnx2x_mcast_bin_elem *p_item, *p_item_n;
3040 
3041 	/* This is actually a 2-part scheme - it starts by converting the MACs
3042 	 * into a list of bins to be added/removed, and correcting the numbers
3043 	 * on the object. this is now allowed, as we're now sure that all
3044 	 * previous configured requests have already applied.
3045 	 * The second part is actually adding rules for the newly introduced
3046 	 * entries [like all the rest of the hdl_pending functions].
3047 	 */
3048 	if (!cmd_pos->set_convert)
3049 		bnx2x_mcast_hdl_pending_set_e2_convert(bp, o, cmd_pos);
3050 
3051 	list_for_each_entry_safe(p_item, p_item_n, &cmd_pos->data.macs_head,
3052 				 link) {
3053 		cfg_data.bin = (u8)p_item->bin;
3054 		o->set_one_rule(bp, o, *cnt, &cfg_data, p_item->type);
3055 		(*cnt)++;
3056 
3057 		list_del(&p_item->link);
3058 
3059 		/* Break if we reached the maximum number of rules. */
3060 		if (*cnt >= o->max_cmd_len)
3061 			break;
3062 	}
3063 
3064 	/* if no more MACs to configure - we are done */
3065 	if (list_empty(&cmd_pos->data.macs_head))
3066 		cmd_pos->done = true;
3067 }
3068 
3069 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
3070 				struct bnx2x_mcast_ramrod_params *p)
3071 {
3072 	struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
3073 	int cnt = 0;
3074 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3075 
3076 	list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
3077 				 link) {
3078 		switch (cmd_pos->type) {
3079 		case BNX2X_MCAST_CMD_ADD:
3080 			bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
3081 			break;
3082 
3083 		case BNX2X_MCAST_CMD_DEL:
3084 			bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
3085 			break;
3086 
3087 		case BNX2X_MCAST_CMD_RESTORE:
3088 			bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
3089 							   &cnt);
3090 			break;
3091 
3092 		case BNX2X_MCAST_CMD_SET:
3093 			bnx2x_mcast_hdl_pending_set_e2(bp, o, cmd_pos, &cnt);
3094 			break;
3095 
3096 		default:
3097 			BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3098 			return -EINVAL;
3099 		}
3100 
3101 		/* If the command has been completed - remove it from the list
3102 		 * and free the memory
3103 		 */
3104 		if (cmd_pos->done) {
3105 			list_del(&cmd_pos->link);
3106 			kfree(cmd_pos);
3107 		}
3108 
3109 		/* Break if we reached the maximum number of rules */
3110 		if (cnt >= o->max_cmd_len)
3111 			break;
3112 	}
3113 
3114 	return cnt;
3115 }
3116 
3117 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
3118 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3119 	int *line_idx)
3120 {
3121 	struct bnx2x_mcast_list_elem *mlist_pos;
3122 	union bnx2x_mcast_config_data cfg_data = {NULL};
3123 	int cnt = *line_idx;
3124 
3125 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3126 		cfg_data.mac = mlist_pos->mac;
3127 		o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
3128 
3129 		cnt++;
3130 
3131 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3132 		   mlist_pos->mac);
3133 	}
3134 
3135 	*line_idx = cnt;
3136 }
3137 
3138 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
3139 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3140 	int *line_idx)
3141 {
3142 	int cnt = *line_idx, i;
3143 
3144 	for (i = 0; i < p->mcast_list_len; i++) {
3145 		o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
3146 
3147 		cnt++;
3148 
3149 		DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
3150 				 p->mcast_list_len - i - 1);
3151 	}
3152 
3153 	*line_idx = cnt;
3154 }
3155 
3156 /**
3157  * bnx2x_mcast_handle_current_cmd -
3158  *
3159  * @bp:		device handle
3160  * @p:
3161  * @cmd:
3162  * @start_cnt:	first line in the ramrod data that may be used
3163  *
3164  * This function is called iff there is enough place for the current command in
3165  * the ramrod data.
3166  * Returns number of lines filled in the ramrod data in total.
3167  */
3168 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
3169 			struct bnx2x_mcast_ramrod_params *p,
3170 			enum bnx2x_mcast_cmd cmd,
3171 			int start_cnt)
3172 {
3173 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3174 	int cnt = start_cnt;
3175 
3176 	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3177 
3178 	switch (cmd) {
3179 	case BNX2X_MCAST_CMD_ADD:
3180 		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
3181 		break;
3182 
3183 	case BNX2X_MCAST_CMD_DEL:
3184 		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
3185 		break;
3186 
3187 	case BNX2X_MCAST_CMD_RESTORE:
3188 		o->hdl_restore(bp, o, 0, &cnt);
3189 		break;
3190 
3191 	default:
3192 		BNX2X_ERR("Unknown command: %d\n", cmd);
3193 		return -EINVAL;
3194 	}
3195 
3196 	/* The current command has been handled */
3197 	p->mcast_list_len = 0;
3198 
3199 	return cnt;
3200 }
3201 
3202 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
3203 				   struct bnx2x_mcast_ramrod_params *p,
3204 				   enum bnx2x_mcast_cmd cmd)
3205 {
3206 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3207 	int reg_sz = o->get_registry_size(o);
3208 
3209 	switch (cmd) {
3210 	/* DEL command deletes all currently configured MACs */
3211 	case BNX2X_MCAST_CMD_DEL:
3212 		o->set_registry_size(o, 0);
3213 		/* Don't break */
3214 
3215 	/* RESTORE command will restore the entire multicast configuration */
3216 	case BNX2X_MCAST_CMD_RESTORE:
3217 		/* Here we set the approximate amount of work to do, which in
3218 		 * fact may be only less as some MACs in postponed ADD
3219 		 * command(s) scheduled before this command may fall into
3220 		 * the same bin and the actual number of bins set in the
3221 		 * registry would be less than we estimated here. See
3222 		 * bnx2x_mcast_set_one_rule_e2() for further details.
3223 		 */
3224 		p->mcast_list_len = reg_sz;
3225 		break;
3226 
3227 	case BNX2X_MCAST_CMD_ADD:
3228 	case BNX2X_MCAST_CMD_CONT:
3229 		/* Here we assume that all new MACs will fall into new bins.
3230 		 * However we will correct the real registry size after we
3231 		 * handle all pending commands.
3232 		 */
3233 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
3234 		break;
3235 
3236 	case BNX2X_MCAST_CMD_SET:
3237 		/* We can only learn how many commands would actually be used
3238 		 * when this is being configured. So for now, simply guarantee
3239 		 * the command will be enqueued [to refrain from adding logic
3240 		 * that handles this and THEN learns it needs several ramrods].
3241 		 * Just like for ADD/Cont, the mcast_list_len might be an over
3242 		 * estimation; or even more so, since we don't take into
3243 		 * account the possibility of removal of existing bins.
3244 		 */
3245 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
3246 		o->total_pending_num += o->max_cmd_len;
3247 		break;
3248 
3249 	default:
3250 		BNX2X_ERR("Unknown command: %d\n", cmd);
3251 		return -EINVAL;
3252 	}
3253 
3254 	/* Increase the total number of MACs pending to be configured */
3255 	o->total_pending_num += p->mcast_list_len;
3256 
3257 	return 0;
3258 }
3259 
3260 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
3261 				      struct bnx2x_mcast_ramrod_params *p,
3262 				  int old_num_bins,
3263 				  enum bnx2x_mcast_cmd cmd)
3264 {
3265 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3266 
3267 	o->set_registry_size(o, old_num_bins);
3268 	o->total_pending_num -= p->mcast_list_len;
3269 
3270 	if (cmd == BNX2X_MCAST_CMD_SET)
3271 		o->total_pending_num -= o->max_cmd_len;
3272 }
3273 
3274 /**
3275  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
3276  *
3277  * @bp:		device handle
3278  * @p:
3279  * @len:	number of rules to handle
3280  */
3281 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
3282 					struct bnx2x_mcast_ramrod_params *p,
3283 					u8 len)
3284 {
3285 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3286 	struct eth_multicast_rules_ramrod_data *data =
3287 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
3288 
3289 	data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3290 					(BNX2X_FILTER_MCAST_PENDING <<
3291 					 BNX2X_SWCID_SHIFT));
3292 	data->header.rule_cnt = len;
3293 }
3294 
3295 /**
3296  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3297  *
3298  * @bp:		device handle
3299  * @o:
3300  *
3301  * Recalculate the actual number of set bins in the registry using Brian
3302  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3303  *
3304  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
3305  */
3306 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
3307 						  struct bnx2x_mcast_obj *o)
3308 {
3309 	int i, cnt = 0;
3310 	u64 elem;
3311 
3312 	for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
3313 		elem = o->registry.aprox_match.vec[i];
3314 		for (; elem; cnt++)
3315 			elem &= elem - 1;
3316 	}
3317 
3318 	o->set_registry_size(o, cnt);
3319 
3320 	return 0;
3321 }
3322 
3323 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
3324 				struct bnx2x_mcast_ramrod_params *p,
3325 				enum bnx2x_mcast_cmd cmd)
3326 {
3327 	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
3328 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3329 	struct eth_multicast_rules_ramrod_data *data =
3330 		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3331 	int cnt = 0, rc;
3332 
3333 	/* Reset the ramrod data buffer */
3334 	memset(data, 0, sizeof(*data));
3335 
3336 	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
3337 
3338 	/* If there are no more pending commands - clear SCHEDULED state */
3339 	if (list_empty(&o->pending_cmds_head))
3340 		o->clear_sched(o);
3341 
3342 	/* The below may be true iff there was enough room in ramrod
3343 	 * data for all pending commands and for the current
3344 	 * command. Otherwise the current command would have been added
3345 	 * to the pending commands and p->mcast_list_len would have been
3346 	 * zeroed.
3347 	 */
3348 	if (p->mcast_list_len > 0)
3349 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3350 
3351 	/* We've pulled out some MACs - update the total number of
3352 	 * outstanding.
3353 	 */
3354 	o->total_pending_num -= cnt;
3355 
3356 	/* send a ramrod */
3357 	WARN_ON(o->total_pending_num < 0);
3358 	WARN_ON(cnt > o->max_cmd_len);
3359 
3360 	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3361 
3362 	/* Update a registry size if there are no more pending operations.
3363 	 *
3364 	 * We don't want to change the value of the registry size if there are
3365 	 * pending operations because we want it to always be equal to the
3366 	 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3367 	 * set bins after the last requested operation in order to properly
3368 	 * evaluate the size of the next DEL/RESTORE operation.
3369 	 *
3370 	 * Note that we update the registry itself during command(s) handling
3371 	 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3372 	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3373 	 * with a limited amount of update commands (per MAC/bin) and we don't
3374 	 * know in this scope what the actual state of bins configuration is
3375 	 * going to be after this ramrod.
3376 	 */
3377 	if (!o->total_pending_num)
3378 		bnx2x_mcast_refresh_registry_e2(bp, o);
3379 
3380 	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3381 	 * RAMROD_PENDING status immediately. due to the SET option, it's also
3382 	 * possible that after evaluating the differences there's no need for
3383 	 * a ramrod. In that case, we can skip it as well.
3384 	 */
3385 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags) || !cnt) {
3386 		raw->clear_pending(raw);
3387 		return 0;
3388 	} else {
3389 		/* No need for an explicit memory barrier here as long as we
3390 		 * ensure the ordering of writing to the SPQ element
3391 		 * and updating of the SPQ producer which involves a memory
3392 		 * read. If the memory read is removed we will have to put a
3393 		 * full memory barrier there (inside bnx2x_sp_post()).
3394 		 */
3395 
3396 		/* Send a ramrod */
3397 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3398 				   raw->cid, U64_HI(raw->rdata_mapping),
3399 				   U64_LO(raw->rdata_mapping),
3400 				   ETH_CONNECTION_TYPE);
3401 		if (rc)
3402 			return rc;
3403 
3404 		/* Ramrod completion is pending */
3405 		return 1;
3406 	}
3407 }
3408 
3409 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3410 				    struct bnx2x_mcast_ramrod_params *p,
3411 				    enum bnx2x_mcast_cmd cmd)
3412 {
3413 	if (cmd == BNX2X_MCAST_CMD_SET) {
3414 		BNX2X_ERR("Can't use `set' command on e1h!\n");
3415 		return -EINVAL;
3416 	}
3417 
3418 	/* Mark, that there is a work to do */
3419 	if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3420 		p->mcast_list_len = 1;
3421 
3422 	return 0;
3423 }
3424 
3425 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3426 				       struct bnx2x_mcast_ramrod_params *p,
3427 				       int old_num_bins,
3428 				       enum bnx2x_mcast_cmd cmd)
3429 {
3430 	/* Do nothing */
3431 }
3432 
3433 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3434 do { \
3435 	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3436 } while (0)
3437 
3438 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3439 					   struct bnx2x_mcast_obj *o,
3440 					   struct bnx2x_mcast_ramrod_params *p,
3441 					   u32 *mc_filter)
3442 {
3443 	struct bnx2x_mcast_list_elem *mlist_pos;
3444 	int bit;
3445 
3446 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3447 		bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3448 		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3449 
3450 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3451 		   mlist_pos->mac, bit);
3452 
3453 		/* bookkeeping... */
3454 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3455 				  bit);
3456 	}
3457 }
3458 
3459 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3460 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3461 	u32 *mc_filter)
3462 {
3463 	int bit;
3464 
3465 	for (bit = bnx2x_mcast_get_next_bin(o, 0);
3466 	     bit >= 0;
3467 	     bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3468 		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3469 		DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3470 	}
3471 }
3472 
3473 /* On 57711 we write the multicast MACs' approximate match
3474  * table by directly into the TSTORM's internal RAM. So we don't
3475  * really need to handle any tricks to make it work.
3476  */
3477 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3478 				 struct bnx2x_mcast_ramrod_params *p,
3479 				 enum bnx2x_mcast_cmd cmd)
3480 {
3481 	int i;
3482 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3483 	struct bnx2x_raw_obj *r = &o->raw;
3484 
3485 	/* If CLEAR_ONLY has been requested - clear the registry
3486 	 * and clear a pending bit.
3487 	 */
3488 	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3489 		u32 mc_filter[MC_HASH_SIZE] = {0};
3490 
3491 		/* Set the multicast filter bits before writing it into
3492 		 * the internal memory.
3493 		 */
3494 		switch (cmd) {
3495 		case BNX2X_MCAST_CMD_ADD:
3496 			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3497 			break;
3498 
3499 		case BNX2X_MCAST_CMD_DEL:
3500 			DP(BNX2X_MSG_SP,
3501 			   "Invalidating multicast MACs configuration\n");
3502 
3503 			/* clear the registry */
3504 			memset(o->registry.aprox_match.vec, 0,
3505 			       sizeof(o->registry.aprox_match.vec));
3506 			break;
3507 
3508 		case BNX2X_MCAST_CMD_RESTORE:
3509 			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3510 			break;
3511 
3512 		default:
3513 			BNX2X_ERR("Unknown command: %d\n", cmd);
3514 			return -EINVAL;
3515 		}
3516 
3517 		/* Set the mcast filter in the internal memory */
3518 		for (i = 0; i < MC_HASH_SIZE; i++)
3519 			REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3520 	} else
3521 		/* clear the registry */
3522 		memset(o->registry.aprox_match.vec, 0,
3523 		       sizeof(o->registry.aprox_match.vec));
3524 
3525 	/* We are done */
3526 	r->clear_pending(r);
3527 
3528 	return 0;
3529 }
3530 
3531 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3532 				   struct bnx2x_mcast_ramrod_params *p,
3533 				   enum bnx2x_mcast_cmd cmd)
3534 {
3535 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3536 	int reg_sz = o->get_registry_size(o);
3537 
3538 	if (cmd == BNX2X_MCAST_CMD_SET) {
3539 		BNX2X_ERR("Can't use `set' command on e1!\n");
3540 		return -EINVAL;
3541 	}
3542 
3543 	switch (cmd) {
3544 	/* DEL command deletes all currently configured MACs */
3545 	case BNX2X_MCAST_CMD_DEL:
3546 		o->set_registry_size(o, 0);
3547 		/* Don't break */
3548 
3549 	/* RESTORE command will restore the entire multicast configuration */
3550 	case BNX2X_MCAST_CMD_RESTORE:
3551 		p->mcast_list_len = reg_sz;
3552 		  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3553 				   cmd, p->mcast_list_len);
3554 		break;
3555 
3556 	case BNX2X_MCAST_CMD_ADD:
3557 	case BNX2X_MCAST_CMD_CONT:
3558 		/* Multicast MACs on 57710 are configured as unicast MACs and
3559 		 * there is only a limited number of CAM entries for that
3560 		 * matter.
3561 		 */
3562 		if (p->mcast_list_len > o->max_cmd_len) {
3563 			BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3564 				  o->max_cmd_len);
3565 			return -EINVAL;
3566 		}
3567 		/* Every configured MAC should be cleared if DEL command is
3568 		 * called. Only the last ADD command is relevant as long as
3569 		 * every ADD commands overrides the previous configuration.
3570 		 */
3571 		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3572 		if (p->mcast_list_len > 0)
3573 			o->set_registry_size(o, p->mcast_list_len);
3574 
3575 		break;
3576 
3577 	default:
3578 		BNX2X_ERR("Unknown command: %d\n", cmd);
3579 		return -EINVAL;
3580 	}
3581 
3582 	/* We want to ensure that commands are executed one by one for 57710.
3583 	 * Therefore each none-empty command will consume o->max_cmd_len.
3584 	 */
3585 	if (p->mcast_list_len)
3586 		o->total_pending_num += o->max_cmd_len;
3587 
3588 	return 0;
3589 }
3590 
3591 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3592 				      struct bnx2x_mcast_ramrod_params *p,
3593 				   int old_num_macs,
3594 				   enum bnx2x_mcast_cmd cmd)
3595 {
3596 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3597 
3598 	o->set_registry_size(o, old_num_macs);
3599 
3600 	/* If current command hasn't been handled yet and we are
3601 	 * here means that it's meant to be dropped and we have to
3602 	 * update the number of outstanding MACs accordingly.
3603 	 */
3604 	if (p->mcast_list_len)
3605 		o->total_pending_num -= o->max_cmd_len;
3606 }
3607 
3608 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3609 					struct bnx2x_mcast_obj *o, int idx,
3610 					union bnx2x_mcast_config_data *cfg_data,
3611 					enum bnx2x_mcast_cmd cmd)
3612 {
3613 	struct bnx2x_raw_obj *r = &o->raw;
3614 	struct mac_configuration_cmd *data =
3615 		(struct mac_configuration_cmd *)(r->rdata);
3616 
3617 	/* copy mac */
3618 	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3619 		bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3620 				      &data->config_table[idx].middle_mac_addr,
3621 				      &data->config_table[idx].lsb_mac_addr,
3622 				      cfg_data->mac);
3623 
3624 		data->config_table[idx].vlan_id = 0;
3625 		data->config_table[idx].pf_id = r->func_id;
3626 		data->config_table[idx].clients_bit_vector =
3627 			cpu_to_le32(1 << r->cl_id);
3628 
3629 		SET_FLAG(data->config_table[idx].flags,
3630 			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3631 			 T_ETH_MAC_COMMAND_SET);
3632 	}
3633 }
3634 
3635 /**
3636  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3637  *
3638  * @bp:		device handle
3639  * @p:
3640  * @len:	number of rules to handle
3641  */
3642 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3643 					struct bnx2x_mcast_ramrod_params *p,
3644 					u8 len)
3645 {
3646 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3647 	struct mac_configuration_cmd *data =
3648 		(struct mac_configuration_cmd *)(r->rdata);
3649 
3650 	u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3651 		     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3652 		     BNX2X_MAX_MULTICAST*(1 + r->func_id));
3653 
3654 	data->hdr.offset = offset;
3655 	data->hdr.client_id = cpu_to_le16(0xff);
3656 	data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3657 				     (BNX2X_FILTER_MCAST_PENDING <<
3658 				      BNX2X_SWCID_SHIFT));
3659 	data->hdr.length = len;
3660 }
3661 
3662 /**
3663  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3664  *
3665  * @bp:		device handle
3666  * @o:
3667  * @start_idx:	index in the registry to start from
3668  * @rdata_idx:	index in the ramrod data to start from
3669  *
3670  * restore command for 57710 is like all other commands - always a stand alone
3671  * command - start_idx and rdata_idx will always be 0. This function will always
3672  * succeed.
3673  * returns -1 to comply with 57712 variant.
3674  */
3675 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3676 	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3677 	int *rdata_idx)
3678 {
3679 	struct bnx2x_mcast_mac_elem *elem;
3680 	int i = 0;
3681 	union bnx2x_mcast_config_data cfg_data = {NULL};
3682 
3683 	/* go through the registry and configure the MACs from it. */
3684 	list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3685 		cfg_data.mac = &elem->mac[0];
3686 		o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3687 
3688 		i++;
3689 
3690 		  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3691 		     cfg_data.mac);
3692 	}
3693 
3694 	*rdata_idx = i;
3695 
3696 	return -1;
3697 }
3698 
3699 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3700 	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3701 {
3702 	struct bnx2x_pending_mcast_cmd *cmd_pos;
3703 	struct bnx2x_mcast_mac_elem *pmac_pos;
3704 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3705 	union bnx2x_mcast_config_data cfg_data = {NULL};
3706 	int cnt = 0;
3707 
3708 	/* If nothing to be done - return */
3709 	if (list_empty(&o->pending_cmds_head))
3710 		return 0;
3711 
3712 	/* Handle the first command */
3713 	cmd_pos = list_first_entry(&o->pending_cmds_head,
3714 				   struct bnx2x_pending_mcast_cmd, link);
3715 
3716 	switch (cmd_pos->type) {
3717 	case BNX2X_MCAST_CMD_ADD:
3718 		list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3719 			cfg_data.mac = &pmac_pos->mac[0];
3720 			o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3721 
3722 			cnt++;
3723 
3724 			DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3725 			   pmac_pos->mac);
3726 		}
3727 		break;
3728 
3729 	case BNX2X_MCAST_CMD_DEL:
3730 		cnt = cmd_pos->data.macs_num;
3731 		DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3732 		break;
3733 
3734 	case BNX2X_MCAST_CMD_RESTORE:
3735 		o->hdl_restore(bp, o, 0, &cnt);
3736 		break;
3737 
3738 	default:
3739 		BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3740 		return -EINVAL;
3741 	}
3742 
3743 	list_del(&cmd_pos->link);
3744 	kfree(cmd_pos);
3745 
3746 	return cnt;
3747 }
3748 
3749 /**
3750  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3751  *
3752  * @fw_hi:
3753  * @fw_mid:
3754  * @fw_lo:
3755  * @mac:
3756  */
3757 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3758 					 __le16 *fw_lo, u8 *mac)
3759 {
3760 	mac[1] = ((u8 *)fw_hi)[0];
3761 	mac[0] = ((u8 *)fw_hi)[1];
3762 	mac[3] = ((u8 *)fw_mid)[0];
3763 	mac[2] = ((u8 *)fw_mid)[1];
3764 	mac[5] = ((u8 *)fw_lo)[0];
3765 	mac[4] = ((u8 *)fw_lo)[1];
3766 }
3767 
3768 /**
3769  * bnx2x_mcast_refresh_registry_e1 -
3770  *
3771  * @bp:		device handle
3772  * @cnt:
3773  *
3774  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3775  * and update the registry correspondingly: if ADD - allocate a memory and add
3776  * the entries to the registry (list), if DELETE - clear the registry and free
3777  * the memory.
3778  */
3779 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3780 						  struct bnx2x_mcast_obj *o)
3781 {
3782 	struct bnx2x_raw_obj *raw = &o->raw;
3783 	struct bnx2x_mcast_mac_elem *elem;
3784 	struct mac_configuration_cmd *data =
3785 			(struct mac_configuration_cmd *)(raw->rdata);
3786 
3787 	/* If first entry contains a SET bit - the command was ADD,
3788 	 * otherwise - DEL_ALL
3789 	 */
3790 	if (GET_FLAG(data->config_table[0].flags,
3791 			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3792 		int i, len = data->hdr.length;
3793 
3794 		/* Break if it was a RESTORE command */
3795 		if (!list_empty(&o->registry.exact_match.macs))
3796 			return 0;
3797 
3798 		elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3799 		if (!elem) {
3800 			BNX2X_ERR("Failed to allocate registry memory\n");
3801 			return -ENOMEM;
3802 		}
3803 
3804 		for (i = 0; i < len; i++, elem++) {
3805 			bnx2x_get_fw_mac_addr(
3806 				&data->config_table[i].msb_mac_addr,
3807 				&data->config_table[i].middle_mac_addr,
3808 				&data->config_table[i].lsb_mac_addr,
3809 				elem->mac);
3810 			DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3811 			   elem->mac);
3812 			list_add_tail(&elem->link,
3813 				      &o->registry.exact_match.macs);
3814 		}
3815 	} else {
3816 		elem = list_first_entry(&o->registry.exact_match.macs,
3817 					struct bnx2x_mcast_mac_elem, link);
3818 		DP(BNX2X_MSG_SP, "Deleting a registry\n");
3819 		kfree(elem);
3820 		INIT_LIST_HEAD(&o->registry.exact_match.macs);
3821 	}
3822 
3823 	return 0;
3824 }
3825 
3826 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3827 				struct bnx2x_mcast_ramrod_params *p,
3828 				enum bnx2x_mcast_cmd cmd)
3829 {
3830 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3831 	struct bnx2x_raw_obj *raw = &o->raw;
3832 	struct mac_configuration_cmd *data =
3833 		(struct mac_configuration_cmd *)(raw->rdata);
3834 	int cnt = 0, i, rc;
3835 
3836 	/* Reset the ramrod data buffer */
3837 	memset(data, 0, sizeof(*data));
3838 
3839 	/* First set all entries as invalid */
3840 	for (i = 0; i < o->max_cmd_len ; i++)
3841 		SET_FLAG(data->config_table[i].flags,
3842 			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3843 			 T_ETH_MAC_COMMAND_INVALIDATE);
3844 
3845 	/* Handle pending commands first */
3846 	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3847 
3848 	/* If there are no more pending commands - clear SCHEDULED state */
3849 	if (list_empty(&o->pending_cmds_head))
3850 		o->clear_sched(o);
3851 
3852 	/* The below may be true iff there were no pending commands */
3853 	if (!cnt)
3854 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3855 
3856 	/* For 57710 every command has o->max_cmd_len length to ensure that
3857 	 * commands are done one at a time.
3858 	 */
3859 	o->total_pending_num -= o->max_cmd_len;
3860 
3861 	/* send a ramrod */
3862 
3863 	WARN_ON(cnt > o->max_cmd_len);
3864 
3865 	/* Set ramrod header (in particular, a number of entries to update) */
3866 	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3867 
3868 	/* update a registry: we need the registry contents to be always up
3869 	 * to date in order to be able to execute a RESTORE opcode. Here
3870 	 * we use the fact that for 57710 we sent one command at a time
3871 	 * hence we may take the registry update out of the command handling
3872 	 * and do it in a simpler way here.
3873 	 */
3874 	rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3875 	if (rc)
3876 		return rc;
3877 
3878 	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3879 	 * RAMROD_PENDING status immediately.
3880 	 */
3881 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3882 		raw->clear_pending(raw);
3883 		return 0;
3884 	} else {
3885 		/* No need for an explicit memory barrier here as long as we
3886 		 * ensure the ordering of writing to the SPQ element
3887 		 * and updating of the SPQ producer which involves a memory
3888 		 * read. If the memory read is removed we will have to put a
3889 		 * full memory barrier there (inside bnx2x_sp_post()).
3890 		 */
3891 
3892 		/* Send a ramrod */
3893 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3894 				   U64_HI(raw->rdata_mapping),
3895 				   U64_LO(raw->rdata_mapping),
3896 				   ETH_CONNECTION_TYPE);
3897 		if (rc)
3898 			return rc;
3899 
3900 		/* Ramrod completion is pending */
3901 		return 1;
3902 	}
3903 }
3904 
3905 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3906 {
3907 	return o->registry.exact_match.num_macs_set;
3908 }
3909 
3910 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3911 {
3912 	return o->registry.aprox_match.num_bins_set;
3913 }
3914 
3915 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3916 						int n)
3917 {
3918 	o->registry.exact_match.num_macs_set = n;
3919 }
3920 
3921 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3922 						int n)
3923 {
3924 	o->registry.aprox_match.num_bins_set = n;
3925 }
3926 
3927 int bnx2x_config_mcast(struct bnx2x *bp,
3928 		       struct bnx2x_mcast_ramrod_params *p,
3929 		       enum bnx2x_mcast_cmd cmd)
3930 {
3931 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3932 	struct bnx2x_raw_obj *r = &o->raw;
3933 	int rc = 0, old_reg_size;
3934 
3935 	/* This is needed to recover number of currently configured mcast macs
3936 	 * in case of failure.
3937 	 */
3938 	old_reg_size = o->get_registry_size(o);
3939 
3940 	/* Do some calculations and checks */
3941 	rc = o->validate(bp, p, cmd);
3942 	if (rc)
3943 		return rc;
3944 
3945 	/* Return if there is no work to do */
3946 	if ((!p->mcast_list_len) && (!o->check_sched(o)))
3947 		return 0;
3948 
3949 	DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3950 	   o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3951 
3952 	/* Enqueue the current command to the pending list if we can't complete
3953 	 * it in the current iteration
3954 	 */
3955 	if (r->check_pending(r) ||
3956 	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3957 		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3958 		if (rc < 0)
3959 			goto error_exit1;
3960 
3961 		/* As long as the current command is in a command list we
3962 		 * don't need to handle it separately.
3963 		 */
3964 		p->mcast_list_len = 0;
3965 	}
3966 
3967 	if (!r->check_pending(r)) {
3968 
3969 		/* Set 'pending' state */
3970 		r->set_pending(r);
3971 
3972 		/* Configure the new classification in the chip */
3973 		rc = o->config_mcast(bp, p, cmd);
3974 		if (rc < 0)
3975 			goto error_exit2;
3976 
3977 		/* Wait for a ramrod completion if was requested */
3978 		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3979 			rc = o->wait_comp(bp, o);
3980 	}
3981 
3982 	return rc;
3983 
3984 error_exit2:
3985 	r->clear_pending(r);
3986 
3987 error_exit1:
3988 	o->revert(bp, p, old_reg_size, cmd);
3989 
3990 	return rc;
3991 }
3992 
3993 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3994 {
3995 	smp_mb__before_atomic();
3996 	clear_bit(o->sched_state, o->raw.pstate);
3997 	smp_mb__after_atomic();
3998 }
3999 
4000 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
4001 {
4002 	smp_mb__before_atomic();
4003 	set_bit(o->sched_state, o->raw.pstate);
4004 	smp_mb__after_atomic();
4005 }
4006 
4007 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
4008 {
4009 	return !!test_bit(o->sched_state, o->raw.pstate);
4010 }
4011 
4012 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
4013 {
4014 	return o->raw.check_pending(&o->raw) || o->check_sched(o);
4015 }
4016 
4017 void bnx2x_init_mcast_obj(struct bnx2x *bp,
4018 			  struct bnx2x_mcast_obj *mcast_obj,
4019 			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
4020 			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
4021 			  int state, unsigned long *pstate, bnx2x_obj_type type)
4022 {
4023 	memset(mcast_obj, 0, sizeof(*mcast_obj));
4024 
4025 	bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
4026 			   rdata, rdata_mapping, state, pstate, type);
4027 
4028 	mcast_obj->engine_id = engine_id;
4029 
4030 	INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
4031 
4032 	mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
4033 	mcast_obj->check_sched = bnx2x_mcast_check_sched;
4034 	mcast_obj->set_sched = bnx2x_mcast_set_sched;
4035 	mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
4036 
4037 	if (CHIP_IS_E1(bp)) {
4038 		mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
4039 		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
4040 		mcast_obj->hdl_restore       =
4041 			bnx2x_mcast_handle_restore_cmd_e1;
4042 		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
4043 
4044 		if (CHIP_REV_IS_SLOW(bp))
4045 			mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
4046 		else
4047 			mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
4048 
4049 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
4050 		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
4051 		mcast_obj->validate          = bnx2x_mcast_validate_e1;
4052 		mcast_obj->revert            = bnx2x_mcast_revert_e1;
4053 		mcast_obj->get_registry_size =
4054 			bnx2x_mcast_get_registry_size_exact;
4055 		mcast_obj->set_registry_size =
4056 			bnx2x_mcast_set_registry_size_exact;
4057 
4058 		/* 57710 is the only chip that uses the exact match for mcast
4059 		 * at the moment.
4060 		 */
4061 		INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
4062 
4063 	} else if (CHIP_IS_E1H(bp)) {
4064 		mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
4065 		mcast_obj->enqueue_cmd   = NULL;
4066 		mcast_obj->hdl_restore   = NULL;
4067 		mcast_obj->check_pending = bnx2x_mcast_check_pending;
4068 
4069 		/* 57711 doesn't send a ramrod, so it has unlimited credit
4070 		 * for one command.
4071 		 */
4072 		mcast_obj->max_cmd_len       = -1;
4073 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
4074 		mcast_obj->set_one_rule      = NULL;
4075 		mcast_obj->validate          = bnx2x_mcast_validate_e1h;
4076 		mcast_obj->revert            = bnx2x_mcast_revert_e1h;
4077 		mcast_obj->get_registry_size =
4078 			bnx2x_mcast_get_registry_size_aprox;
4079 		mcast_obj->set_registry_size =
4080 			bnx2x_mcast_set_registry_size_aprox;
4081 	} else {
4082 		mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
4083 		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
4084 		mcast_obj->hdl_restore       =
4085 			bnx2x_mcast_handle_restore_cmd_e2;
4086 		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
4087 		/* TODO: There should be a proper HSI define for this number!!!
4088 		 */
4089 		mcast_obj->max_cmd_len       = 16;
4090 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
4091 		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
4092 		mcast_obj->validate          = bnx2x_mcast_validate_e2;
4093 		mcast_obj->revert            = bnx2x_mcast_revert_e2;
4094 		mcast_obj->get_registry_size =
4095 			bnx2x_mcast_get_registry_size_aprox;
4096 		mcast_obj->set_registry_size =
4097 			bnx2x_mcast_set_registry_size_aprox;
4098 	}
4099 }
4100 
4101 /*************************** Credit handling **********************************/
4102 
4103 /**
4104  * atomic_add_ifless - add if the result is less than a given value.
4105  *
4106  * @v:	pointer of type atomic_t
4107  * @a:	the amount to add to v...
4108  * @u:	...if (v + a) is less than u.
4109  *
4110  * returns true if (v + a) was less than u, and false otherwise.
4111  *
4112  */
4113 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
4114 {
4115 	int c, old;
4116 
4117 	c = atomic_read(v);
4118 	for (;;) {
4119 		if (unlikely(c + a >= u))
4120 			return false;
4121 
4122 		old = atomic_cmpxchg((v), c, c + a);
4123 		if (likely(old == c))
4124 			break;
4125 		c = old;
4126 	}
4127 
4128 	return true;
4129 }
4130 
4131 /**
4132  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
4133  *
4134  * @v:	pointer of type atomic_t
4135  * @a:	the amount to dec from v...
4136  * @u:	...if (v - a) is more or equal than u.
4137  *
4138  * returns true if (v - a) was more or equal than u, and false
4139  * otherwise.
4140  */
4141 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
4142 {
4143 	int c, old;
4144 
4145 	c = atomic_read(v);
4146 	for (;;) {
4147 		if (unlikely(c - a < u))
4148 			return false;
4149 
4150 		old = atomic_cmpxchg((v), c, c - a);
4151 		if (likely(old == c))
4152 			break;
4153 		c = old;
4154 	}
4155 
4156 	return true;
4157 }
4158 
4159 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
4160 {
4161 	bool rc;
4162 
4163 	smp_mb();
4164 	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4165 	smp_mb();
4166 
4167 	return rc;
4168 }
4169 
4170 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
4171 {
4172 	bool rc;
4173 
4174 	smp_mb();
4175 
4176 	/* Don't let to refill if credit + cnt > pool_sz */
4177 	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4178 
4179 	smp_mb();
4180 
4181 	return rc;
4182 }
4183 
4184 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
4185 {
4186 	int cur_credit;
4187 
4188 	smp_mb();
4189 	cur_credit = atomic_read(&o->credit);
4190 
4191 	return cur_credit;
4192 }
4193 
4194 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
4195 					  int cnt)
4196 {
4197 	return true;
4198 }
4199 
4200 static bool bnx2x_credit_pool_get_entry(
4201 	struct bnx2x_credit_pool_obj *o,
4202 	int *offset)
4203 {
4204 	int idx, vec, i;
4205 
4206 	*offset = -1;
4207 
4208 	/* Find "internal cam-offset" then add to base for this object... */
4209 	for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
4210 
4211 		/* Skip the current vector if there are no free entries in it */
4212 		if (!o->pool_mirror[vec])
4213 			continue;
4214 
4215 		/* If we've got here we are going to find a free entry */
4216 		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4217 		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
4218 
4219 			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4220 				/* Got one!! */
4221 				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4222 				*offset = o->base_pool_offset + idx;
4223 				return true;
4224 			}
4225 	}
4226 
4227 	return false;
4228 }
4229 
4230 static bool bnx2x_credit_pool_put_entry(
4231 	struct bnx2x_credit_pool_obj *o,
4232 	int offset)
4233 {
4234 	if (offset < o->base_pool_offset)
4235 		return false;
4236 
4237 	offset -= o->base_pool_offset;
4238 
4239 	if (offset >= o->pool_sz)
4240 		return false;
4241 
4242 	/* Return the entry to the pool */
4243 	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4244 
4245 	return true;
4246 }
4247 
4248 static bool bnx2x_credit_pool_put_entry_always_true(
4249 	struct bnx2x_credit_pool_obj *o,
4250 	int offset)
4251 {
4252 	return true;
4253 }
4254 
4255 static bool bnx2x_credit_pool_get_entry_always_true(
4256 	struct bnx2x_credit_pool_obj *o,
4257 	int *offset)
4258 {
4259 	*offset = -1;
4260 	return true;
4261 }
4262 /**
4263  * bnx2x_init_credit_pool - initialize credit pool internals.
4264  *
4265  * @p:
4266  * @base:	Base entry in the CAM to use.
4267  * @credit:	pool size.
4268  *
4269  * If base is negative no CAM entries handling will be performed.
4270  * If credit is negative pool operations will always succeed (unlimited pool).
4271  *
4272  */
4273 void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
4274 			    int base, int credit)
4275 {
4276 	/* Zero the object first */
4277 	memset(p, 0, sizeof(*p));
4278 
4279 	/* Set the table to all 1s */
4280 	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4281 
4282 	/* Init a pool as full */
4283 	atomic_set(&p->credit, credit);
4284 
4285 	/* The total poll size */
4286 	p->pool_sz = credit;
4287 
4288 	p->base_pool_offset = base;
4289 
4290 	/* Commit the change */
4291 	smp_mb();
4292 
4293 	p->check = bnx2x_credit_pool_check;
4294 
4295 	/* if pool credit is negative - disable the checks */
4296 	if (credit >= 0) {
4297 		p->put      = bnx2x_credit_pool_put;
4298 		p->get      = bnx2x_credit_pool_get;
4299 		p->put_entry = bnx2x_credit_pool_put_entry;
4300 		p->get_entry = bnx2x_credit_pool_get_entry;
4301 	} else {
4302 		p->put      = bnx2x_credit_pool_always_true;
4303 		p->get      = bnx2x_credit_pool_always_true;
4304 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4305 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4306 	}
4307 
4308 	/* If base is negative - disable entries handling */
4309 	if (base < 0) {
4310 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4311 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4312 	}
4313 }
4314 
4315 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
4316 				struct bnx2x_credit_pool_obj *p, u8 func_id,
4317 				u8 func_num)
4318 {
4319 /* TODO: this will be defined in consts as well... */
4320 #define BNX2X_CAM_SIZE_EMUL 5
4321 
4322 	int cam_sz;
4323 
4324 	if (CHIP_IS_E1(bp)) {
4325 		/* In E1, Multicast is saved in cam... */
4326 		if (!CHIP_REV_IS_SLOW(bp))
4327 			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
4328 		else
4329 			cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
4330 
4331 		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4332 
4333 	} else if (CHIP_IS_E1H(bp)) {
4334 		/* CAM credit is equaly divided between all active functions
4335 		 * on the PORT!.
4336 		 */
4337 		if ((func_num > 0)) {
4338 			if (!CHIP_REV_IS_SLOW(bp))
4339 				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4340 			else
4341 				cam_sz = BNX2X_CAM_SIZE_EMUL;
4342 			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4343 		} else {
4344 			/* this should never happen! Block MAC operations. */
4345 			bnx2x_init_credit_pool(p, 0, 0);
4346 		}
4347 
4348 	} else {
4349 
4350 		/* CAM credit is equaly divided between all active functions
4351 		 * on the PATH.
4352 		 */
4353 		if (func_num > 0) {
4354 			if (!CHIP_REV_IS_SLOW(bp))
4355 				cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
4356 			else
4357 				cam_sz = BNX2X_CAM_SIZE_EMUL;
4358 
4359 			/* No need for CAM entries handling for 57712 and
4360 			 * newer.
4361 			 */
4362 			bnx2x_init_credit_pool(p, -1, cam_sz);
4363 		} else {
4364 			/* this should never happen! Block MAC operations. */
4365 			bnx2x_init_credit_pool(p, 0, 0);
4366 		}
4367 	}
4368 }
4369 
4370 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4371 				 struct bnx2x_credit_pool_obj *p,
4372 				 u8 func_id,
4373 				 u8 func_num)
4374 {
4375 	if (CHIP_IS_E1x(bp)) {
4376 		/* There is no VLAN credit in HW on 57710 and 57711 only
4377 		 * MAC / MAC-VLAN can be set
4378 		 */
4379 		bnx2x_init_credit_pool(p, 0, -1);
4380 	} else {
4381 		/* CAM credit is equally divided between all active functions
4382 		 * on the PATH.
4383 		 */
4384 		if (func_num > 0) {
4385 			int credit = PF_VLAN_CREDIT_E2(bp, func_num);
4386 
4387 			bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
4388 		} else
4389 			/* this should never happen! Block VLAN operations. */
4390 			bnx2x_init_credit_pool(p, 0, 0);
4391 	}
4392 }
4393 
4394 /****************** RSS Configuration ******************/
4395 /**
4396  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4397  *
4398  * @bp:		driver handle
4399  * @p:		pointer to rss configuration
4400  *
4401  * Prints it when NETIF_MSG_IFUP debug level is configured.
4402  */
4403 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4404 					struct bnx2x_config_rss_params *p)
4405 {
4406 	int i;
4407 
4408 	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4409 	DP(BNX2X_MSG_SP, "0x0000: ");
4410 	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4411 		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4412 
4413 		/* Print 4 bytes in a line */
4414 		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4415 		    (((i + 1) & 0x3) == 0)) {
4416 			DP_CONT(BNX2X_MSG_SP, "\n");
4417 			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4418 		}
4419 	}
4420 
4421 	DP_CONT(BNX2X_MSG_SP, "\n");
4422 }
4423 
4424 /**
4425  * bnx2x_setup_rss - configure RSS
4426  *
4427  * @bp:		device handle
4428  * @p:		rss configuration
4429  *
4430  * sends on UPDATE ramrod for that matter.
4431  */
4432 static int bnx2x_setup_rss(struct bnx2x *bp,
4433 			   struct bnx2x_config_rss_params *p)
4434 {
4435 	struct bnx2x_rss_config_obj *o = p->rss_obj;
4436 	struct bnx2x_raw_obj *r = &o->raw;
4437 	struct eth_rss_update_ramrod_data *data =
4438 		(struct eth_rss_update_ramrod_data *)(r->rdata);
4439 	u16 caps = 0;
4440 	u8 rss_mode = 0;
4441 	int rc;
4442 
4443 	memset(data, 0, sizeof(*data));
4444 
4445 	DP(BNX2X_MSG_SP, "Configuring RSS\n");
4446 
4447 	/* Set an echo field */
4448 	data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4449 				 (r->state << BNX2X_SWCID_SHIFT));
4450 
4451 	/* RSS mode */
4452 	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4453 		rss_mode = ETH_RSS_MODE_DISABLED;
4454 	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4455 		rss_mode = ETH_RSS_MODE_REGULAR;
4456 
4457 	data->rss_mode = rss_mode;
4458 
4459 	DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4460 
4461 	/* RSS capabilities */
4462 	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4463 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4464 
4465 	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4466 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4467 
4468 	if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4469 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4470 
4471 	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4472 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4473 
4474 	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4475 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4476 
4477 	if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4478 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4479 
4480 	if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
4481 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
4482 
4483 	if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
4484 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
4485 
4486 	if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
4487 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
4488 
4489 	/* RSS keys */
4490 	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4491 		u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key);
4492 		const u8 *src = (const u8 *)p->rss_key;
4493 		int i;
4494 
4495 		/* Apparently, bnx2x reads this array in reverse order
4496 		 * We need to byte swap rss_key to comply with Toeplitz specs.
4497 		 */
4498 		for (i = 0; i < sizeof(data->rss_key); i++)
4499 			*--dst = *src++;
4500 
4501 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4502 	}
4503 
4504 	data->capabilities = cpu_to_le16(caps);
4505 
4506 	/* Hashing mask */
4507 	data->rss_result_mask = p->rss_result_mask;
4508 
4509 	/* RSS engine ID */
4510 	data->rss_engine_id = o->engine_id;
4511 
4512 	DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4513 
4514 	/* Indirection table */
4515 	memcpy(data->indirection_table, p->ind_table,
4516 		  T_ETH_INDIRECTION_TABLE_SIZE);
4517 
4518 	/* Remember the last configuration */
4519 	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4520 
4521 	/* Print the indirection table */
4522 	if (netif_msg_ifup(bp))
4523 		bnx2x_debug_print_ind_table(bp, p);
4524 
4525 	/* No need for an explicit memory barrier here as long as we
4526 	 * ensure the ordering of writing to the SPQ element
4527 	 * and updating of the SPQ producer which involves a memory
4528 	 * read. If the memory read is removed we will have to put a
4529 	 * full memory barrier there (inside bnx2x_sp_post()).
4530 	 */
4531 
4532 	/* Send a ramrod */
4533 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4534 			   U64_HI(r->rdata_mapping),
4535 			   U64_LO(r->rdata_mapping),
4536 			   ETH_CONNECTION_TYPE);
4537 
4538 	if (rc < 0)
4539 		return rc;
4540 
4541 	return 1;
4542 }
4543 
4544 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4545 			     u8 *ind_table)
4546 {
4547 	memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4548 }
4549 
4550 int bnx2x_config_rss(struct bnx2x *bp,
4551 		     struct bnx2x_config_rss_params *p)
4552 {
4553 	int rc;
4554 	struct bnx2x_rss_config_obj *o = p->rss_obj;
4555 	struct bnx2x_raw_obj *r = &o->raw;
4556 
4557 	/* Do nothing if only driver cleanup was requested */
4558 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4559 		DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4560 		   p->ramrod_flags);
4561 		return 0;
4562 	}
4563 
4564 	r->set_pending(r);
4565 
4566 	rc = o->config_rss(bp, p);
4567 	if (rc < 0) {
4568 		r->clear_pending(r);
4569 		return rc;
4570 	}
4571 
4572 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4573 		rc = r->wait_comp(bp, r);
4574 
4575 	return rc;
4576 }
4577 
4578 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4579 			       struct bnx2x_rss_config_obj *rss_obj,
4580 			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4581 			       void *rdata, dma_addr_t rdata_mapping,
4582 			       int state, unsigned long *pstate,
4583 			       bnx2x_obj_type type)
4584 {
4585 	bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4586 			   rdata_mapping, state, pstate, type);
4587 
4588 	rss_obj->engine_id  = engine_id;
4589 	rss_obj->config_rss = bnx2x_setup_rss;
4590 }
4591 
4592 /********************** Queue state object ***********************************/
4593 
4594 /**
4595  * bnx2x_queue_state_change - perform Queue state change transition
4596  *
4597  * @bp:		device handle
4598  * @params:	parameters to perform the transition
4599  *
4600  * returns 0 in case of successfully completed transition, negative error
4601  * code in case of failure, positive (EBUSY) value if there is a completion
4602  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4603  * not set in params->ramrod_flags for asynchronous commands).
4604  *
4605  */
4606 int bnx2x_queue_state_change(struct bnx2x *bp,
4607 			     struct bnx2x_queue_state_params *params)
4608 {
4609 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4610 	int rc, pending_bit;
4611 	unsigned long *pending = &o->pending;
4612 
4613 	/* Check that the requested transition is legal */
4614 	rc = o->check_transition(bp, o, params);
4615 	if (rc) {
4616 		BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4617 		return -EINVAL;
4618 	}
4619 
4620 	/* Set "pending" bit */
4621 	DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4622 	pending_bit = o->set_pending(o, params);
4623 	DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4624 
4625 	/* Don't send a command if only driver cleanup was requested */
4626 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4627 		o->complete_cmd(bp, o, pending_bit);
4628 	else {
4629 		/* Send a ramrod */
4630 		rc = o->send_cmd(bp, params);
4631 		if (rc) {
4632 			o->next_state = BNX2X_Q_STATE_MAX;
4633 			clear_bit(pending_bit, pending);
4634 			smp_mb__after_atomic();
4635 			return rc;
4636 		}
4637 
4638 		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4639 			rc = o->wait_comp(bp, o, pending_bit);
4640 			if (rc)
4641 				return rc;
4642 
4643 			return 0;
4644 		}
4645 	}
4646 
4647 	return !!test_bit(pending_bit, pending);
4648 }
4649 
4650 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4651 				   struct bnx2x_queue_state_params *params)
4652 {
4653 	enum bnx2x_queue_cmd cmd = params->cmd, bit;
4654 
4655 	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4656 	 * UPDATE command.
4657 	 */
4658 	if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4659 	    (cmd == BNX2X_Q_CMD_DEACTIVATE))
4660 		bit = BNX2X_Q_CMD_UPDATE;
4661 	else
4662 		bit = cmd;
4663 
4664 	set_bit(bit, &obj->pending);
4665 	return bit;
4666 }
4667 
4668 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4669 				 struct bnx2x_queue_sp_obj *o,
4670 				 enum bnx2x_queue_cmd cmd)
4671 {
4672 	return bnx2x_state_wait(bp, cmd, &o->pending);
4673 }
4674 
4675 /**
4676  * bnx2x_queue_comp_cmd - complete the state change command.
4677  *
4678  * @bp:		device handle
4679  * @o:
4680  * @cmd:
4681  *
4682  * Checks that the arrived completion is expected.
4683  */
4684 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4685 				struct bnx2x_queue_sp_obj *o,
4686 				enum bnx2x_queue_cmd cmd)
4687 {
4688 	unsigned long cur_pending = o->pending;
4689 
4690 	if (!test_and_clear_bit(cmd, &cur_pending)) {
4691 		BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4692 			  cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4693 			  o->state, cur_pending, o->next_state);
4694 		return -EINVAL;
4695 	}
4696 
4697 	if (o->next_tx_only >= o->max_cos)
4698 		/* >= because tx only must always be smaller than cos since the
4699 		 * primary connection supports COS 0
4700 		 */
4701 		BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4702 			   o->next_tx_only, o->max_cos);
4703 
4704 	DP(BNX2X_MSG_SP,
4705 	   "Completing command %d for queue %d, setting state to %d\n",
4706 	   cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4707 
4708 	if (o->next_tx_only)  /* print num tx-only if any exist */
4709 		DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4710 		   o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4711 
4712 	o->state = o->next_state;
4713 	o->num_tx_only = o->next_tx_only;
4714 	o->next_state = BNX2X_Q_STATE_MAX;
4715 
4716 	/* It's important that o->state and o->next_state are
4717 	 * updated before o->pending.
4718 	 */
4719 	wmb();
4720 
4721 	clear_bit(cmd, &o->pending);
4722 	smp_mb__after_atomic();
4723 
4724 	return 0;
4725 }
4726 
4727 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4728 				struct bnx2x_queue_state_params *cmd_params,
4729 				struct client_init_ramrod_data *data)
4730 {
4731 	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4732 
4733 	/* Rx data */
4734 
4735 	/* IPv6 TPA supported for E2 and above only */
4736 	data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4737 				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4738 }
4739 
4740 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4741 				struct bnx2x_queue_sp_obj *o,
4742 				struct bnx2x_general_setup_params *params,
4743 				struct client_init_general_data *gen_data,
4744 				unsigned long *flags)
4745 {
4746 	gen_data->client_id = o->cl_id;
4747 
4748 	if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4749 		gen_data->statistics_counter_id =
4750 					params->stat_id;
4751 		gen_data->statistics_en_flg = 1;
4752 		gen_data->statistics_zero_flg =
4753 			test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4754 	} else
4755 		gen_data->statistics_counter_id =
4756 					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4757 
4758 	gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4759 	gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4760 	gen_data->sp_client_id = params->spcl_id;
4761 	gen_data->mtu = cpu_to_le16(params->mtu);
4762 	gen_data->func_id = o->func_id;
4763 
4764 	gen_data->cos = params->cos;
4765 
4766 	gen_data->traffic_type =
4767 		test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4768 		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4769 
4770 	gen_data->fp_hsi_ver = params->fp_hsi;
4771 
4772 	DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4773 	   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4774 }
4775 
4776 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4777 				struct bnx2x_txq_setup_params *params,
4778 				struct client_init_tx_data *tx_data,
4779 				unsigned long *flags)
4780 {
4781 	tx_data->enforce_security_flg =
4782 		test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4783 	tx_data->default_vlan =
4784 		cpu_to_le16(params->default_vlan);
4785 	tx_data->default_vlan_flg =
4786 		test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4787 	tx_data->tx_switching_flg =
4788 		test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4789 	tx_data->anti_spoofing_flg =
4790 		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4791 	tx_data->force_default_pri_flg =
4792 		test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4793 	tx_data->refuse_outband_vlan_flg =
4794 		test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4795 	tx_data->tunnel_lso_inc_ip_id =
4796 		test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4797 	tx_data->tunnel_non_lso_pcsum_location =
4798 		test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4799 							    CSUM_ON_BD;
4800 
4801 	tx_data->tx_status_block_id = params->fw_sb_id;
4802 	tx_data->tx_sb_index_number = params->sb_cq_index;
4803 	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4804 
4805 	tx_data->tx_bd_page_base.lo =
4806 		cpu_to_le32(U64_LO(params->dscr_map));
4807 	tx_data->tx_bd_page_base.hi =
4808 		cpu_to_le32(U64_HI(params->dscr_map));
4809 
4810 	/* Don't configure any Tx switching mode during queue SETUP */
4811 	tx_data->state = 0;
4812 }
4813 
4814 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4815 				struct rxq_pause_params *params,
4816 				struct client_init_rx_data *rx_data)
4817 {
4818 	/* flow control data */
4819 	rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4820 	rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4821 	rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4822 	rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4823 	rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4824 	rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4825 	rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4826 }
4827 
4828 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4829 				struct bnx2x_rxq_setup_params *params,
4830 				struct client_init_rx_data *rx_data,
4831 				unsigned long *flags)
4832 {
4833 	rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4834 				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4835 	rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4836 				CLIENT_INIT_RX_DATA_TPA_MODE;
4837 	rx_data->vmqueue_mode_en_flg = 0;
4838 
4839 	rx_data->cache_line_alignment_log_size =
4840 		params->cache_line_log;
4841 	rx_data->enable_dynamic_hc =
4842 		test_bit(BNX2X_Q_FLG_DHC, flags);
4843 	rx_data->max_sges_for_packet = params->max_sges_pkt;
4844 	rx_data->client_qzone_id = params->cl_qzone_id;
4845 	rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4846 
4847 	/* Always start in DROP_ALL mode */
4848 	rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4849 				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4850 
4851 	/* We don't set drop flags */
4852 	rx_data->drop_ip_cs_err_flg = 0;
4853 	rx_data->drop_tcp_cs_err_flg = 0;
4854 	rx_data->drop_ttl0_flg = 0;
4855 	rx_data->drop_udp_cs_err_flg = 0;
4856 	rx_data->inner_vlan_removal_enable_flg =
4857 		test_bit(BNX2X_Q_FLG_VLAN, flags);
4858 	rx_data->outer_vlan_removal_enable_flg =
4859 		test_bit(BNX2X_Q_FLG_OV, flags);
4860 	rx_data->status_block_id = params->fw_sb_id;
4861 	rx_data->rx_sb_index_number = params->sb_cq_index;
4862 	rx_data->max_tpa_queues = params->max_tpa_queues;
4863 	rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4864 	rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4865 	rx_data->bd_page_base.lo =
4866 		cpu_to_le32(U64_LO(params->dscr_map));
4867 	rx_data->bd_page_base.hi =
4868 		cpu_to_le32(U64_HI(params->dscr_map));
4869 	rx_data->sge_page_base.lo =
4870 		cpu_to_le32(U64_LO(params->sge_map));
4871 	rx_data->sge_page_base.hi =
4872 		cpu_to_le32(U64_HI(params->sge_map));
4873 	rx_data->cqe_page_base.lo =
4874 		cpu_to_le32(U64_LO(params->rcq_map));
4875 	rx_data->cqe_page_base.hi =
4876 		cpu_to_le32(U64_HI(params->rcq_map));
4877 	rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4878 
4879 	if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4880 		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4881 		rx_data->is_approx_mcast = 1;
4882 	}
4883 
4884 	rx_data->rss_engine_id = params->rss_engine_id;
4885 
4886 	/* silent vlan removal */
4887 	rx_data->silent_vlan_removal_flg =
4888 		test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4889 	rx_data->silent_vlan_value =
4890 		cpu_to_le16(params->silent_removal_value);
4891 	rx_data->silent_vlan_mask =
4892 		cpu_to_le16(params->silent_removal_mask);
4893 }
4894 
4895 /* initialize the general, tx and rx parts of a queue object */
4896 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4897 				struct bnx2x_queue_state_params *cmd_params,
4898 				struct client_init_ramrod_data *data)
4899 {
4900 	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4901 				       &cmd_params->params.setup.gen_params,
4902 				       &data->general,
4903 				       &cmd_params->params.setup.flags);
4904 
4905 	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4906 				  &cmd_params->params.setup.txq_params,
4907 				  &data->tx,
4908 				  &cmd_params->params.setup.flags);
4909 
4910 	bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4911 				  &cmd_params->params.setup.rxq_params,
4912 				  &data->rx,
4913 				  &cmd_params->params.setup.flags);
4914 
4915 	bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4916 				     &cmd_params->params.setup.pause_params,
4917 				     &data->rx);
4918 }
4919 
4920 /* initialize the general and tx parts of a tx-only queue object */
4921 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4922 				struct bnx2x_queue_state_params *cmd_params,
4923 				struct tx_queue_init_ramrod_data *data)
4924 {
4925 	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4926 				       &cmd_params->params.tx_only.gen_params,
4927 				       &data->general,
4928 				       &cmd_params->params.tx_only.flags);
4929 
4930 	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4931 				  &cmd_params->params.tx_only.txq_params,
4932 				  &data->tx,
4933 				  &cmd_params->params.tx_only.flags);
4934 
4935 	DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4936 			 cmd_params->q_obj->cids[0],
4937 			 data->tx.tx_bd_page_base.lo,
4938 			 data->tx.tx_bd_page_base.hi);
4939 }
4940 
4941 /**
4942  * bnx2x_q_init - init HW/FW queue
4943  *
4944  * @bp:		device handle
4945  * @params:
4946  *
4947  * HW/FW initial Queue configuration:
4948  *      - HC: Rx and Tx
4949  *      - CDU context validation
4950  *
4951  */
4952 static inline int bnx2x_q_init(struct bnx2x *bp,
4953 			       struct bnx2x_queue_state_params *params)
4954 {
4955 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4956 	struct bnx2x_queue_init_params *init = &params->params.init;
4957 	u16 hc_usec;
4958 	u8 cos;
4959 
4960 	/* Tx HC configuration */
4961 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4962 	    test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4963 		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4964 
4965 		bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4966 			init->tx.sb_cq_index,
4967 			!test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4968 			hc_usec);
4969 	}
4970 
4971 	/* Rx HC configuration */
4972 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4973 	    test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4974 		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4975 
4976 		bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4977 			init->rx.sb_cq_index,
4978 			!test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4979 			hc_usec);
4980 	}
4981 
4982 	/* Set CDU context validation values */
4983 	for (cos = 0; cos < o->max_cos; cos++) {
4984 		DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4985 				 o->cids[cos], cos);
4986 		DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4987 		bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4988 	}
4989 
4990 	/* As no ramrod is sent, complete the command immediately  */
4991 	o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4992 
4993 	mmiowb();
4994 	smp_mb();
4995 
4996 	return 0;
4997 }
4998 
4999 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
5000 					struct bnx2x_queue_state_params *params)
5001 {
5002 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5003 	struct client_init_ramrod_data *rdata =
5004 		(struct client_init_ramrod_data *)o->rdata;
5005 	dma_addr_t data_mapping = o->rdata_mapping;
5006 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5007 
5008 	/* Clear the ramrod data */
5009 	memset(rdata, 0, sizeof(*rdata));
5010 
5011 	/* Fill the ramrod data */
5012 	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
5013 
5014 	/* No need for an explicit memory barrier here as long as we
5015 	 * ensure the ordering of writing to the SPQ element
5016 	 * and updating of the SPQ producer which involves a memory
5017 	 * read. If the memory read is removed we will have to put a
5018 	 * full memory barrier there (inside bnx2x_sp_post()).
5019 	 */
5020 	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
5021 			     U64_HI(data_mapping),
5022 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
5023 }
5024 
5025 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
5026 					struct bnx2x_queue_state_params *params)
5027 {
5028 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5029 	struct client_init_ramrod_data *rdata =
5030 		(struct client_init_ramrod_data *)o->rdata;
5031 	dma_addr_t data_mapping = o->rdata_mapping;
5032 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5033 
5034 	/* Clear the ramrod data */
5035 	memset(rdata, 0, sizeof(*rdata));
5036 
5037 	/* Fill the ramrod data */
5038 	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
5039 	bnx2x_q_fill_setup_data_e2(bp, params, rdata);
5040 
5041 	/* No need for an explicit memory barrier here as long as we
5042 	 * ensure the ordering of writing to the SPQ element
5043 	 * and updating of the SPQ producer which involves a memory
5044 	 * read. If the memory read is removed we will have to put a
5045 	 * full memory barrier there (inside bnx2x_sp_post()).
5046 	 */
5047 	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
5048 			     U64_HI(data_mapping),
5049 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
5050 }
5051 
5052 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
5053 				  struct bnx2x_queue_state_params *params)
5054 {
5055 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5056 	struct tx_queue_init_ramrod_data *rdata =
5057 		(struct tx_queue_init_ramrod_data *)o->rdata;
5058 	dma_addr_t data_mapping = o->rdata_mapping;
5059 	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
5060 	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
5061 		&params->params.tx_only;
5062 	u8 cid_index = tx_only_params->cid_index;
5063 
5064 	if (cid_index >= o->max_cos) {
5065 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5066 			  o->cl_id, cid_index);
5067 		return -EINVAL;
5068 	}
5069 
5070 	DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
5071 			 tx_only_params->gen_params.cos,
5072 			 tx_only_params->gen_params.spcl_id);
5073 
5074 	/* Clear the ramrod data */
5075 	memset(rdata, 0, sizeof(*rdata));
5076 
5077 	/* Fill the ramrod data */
5078 	bnx2x_q_fill_setup_tx_only(bp, params, rdata);
5079 
5080 	DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
5081 			 o->cids[cid_index], rdata->general.client_id,
5082 			 rdata->general.sp_client_id, rdata->general.cos);
5083 
5084 	/* No need for an explicit memory barrier here as long as we
5085 	 * ensure the ordering of writing to the SPQ element
5086 	 * and updating of the SPQ producer which involves a memory
5087 	 * read. If the memory read is removed we will have to put a
5088 	 * full memory barrier there (inside bnx2x_sp_post()).
5089 	 */
5090 	return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
5091 			     U64_HI(data_mapping),
5092 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
5093 }
5094 
5095 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
5096 				     struct bnx2x_queue_sp_obj *obj,
5097 				     struct bnx2x_queue_update_params *params,
5098 				     struct client_update_ramrod_data *data)
5099 {
5100 	/* Client ID of the client to update */
5101 	data->client_id = obj->cl_id;
5102 
5103 	/* Function ID of the client to update */
5104 	data->func_id = obj->func_id;
5105 
5106 	/* Default VLAN value */
5107 	data->default_vlan = cpu_to_le16(params->def_vlan);
5108 
5109 	/* Inner VLAN stripping */
5110 	data->inner_vlan_removal_enable_flg =
5111 		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
5112 	data->inner_vlan_removal_change_flg =
5113 		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
5114 			 &params->update_flags);
5115 
5116 	/* Outer VLAN stripping */
5117 	data->outer_vlan_removal_enable_flg =
5118 		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
5119 	data->outer_vlan_removal_change_flg =
5120 		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
5121 			 &params->update_flags);
5122 
5123 	/* Drop packets that have source MAC that doesn't belong to this
5124 	 * Queue.
5125 	 */
5126 	data->anti_spoofing_enable_flg =
5127 		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
5128 	data->anti_spoofing_change_flg =
5129 		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
5130 
5131 	/* Activate/Deactivate */
5132 	data->activate_flg =
5133 		test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
5134 	data->activate_change_flg =
5135 		test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
5136 
5137 	/* Enable default VLAN */
5138 	data->default_vlan_enable_flg =
5139 		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
5140 	data->default_vlan_change_flg =
5141 		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
5142 			 &params->update_flags);
5143 
5144 	/* silent vlan removal */
5145 	data->silent_vlan_change_flg =
5146 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5147 			 &params->update_flags);
5148 	data->silent_vlan_removal_flg =
5149 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
5150 	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
5151 	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
5152 
5153 	/* tx switching */
5154 	data->tx_switching_flg =
5155 		test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
5156 	data->tx_switching_change_flg =
5157 		test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
5158 			 &params->update_flags);
5159 
5160 	/* PTP */
5161 	data->handle_ptp_pkts_flg =
5162 		test_bit(BNX2X_Q_UPDATE_PTP_PKTS, &params->update_flags);
5163 	data->handle_ptp_pkts_change_flg =
5164 		test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, &params->update_flags);
5165 }
5166 
5167 static inline int bnx2x_q_send_update(struct bnx2x *bp,
5168 				      struct bnx2x_queue_state_params *params)
5169 {
5170 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5171 	struct client_update_ramrod_data *rdata =
5172 		(struct client_update_ramrod_data *)o->rdata;
5173 	dma_addr_t data_mapping = o->rdata_mapping;
5174 	struct bnx2x_queue_update_params *update_params =
5175 		&params->params.update;
5176 	u8 cid_index = update_params->cid_index;
5177 
5178 	if (cid_index >= o->max_cos) {
5179 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5180 			  o->cl_id, cid_index);
5181 		return -EINVAL;
5182 	}
5183 
5184 	/* Clear the ramrod data */
5185 	memset(rdata, 0, sizeof(*rdata));
5186 
5187 	/* Fill the ramrod data */
5188 	bnx2x_q_fill_update_data(bp, o, update_params, rdata);
5189 
5190 	/* No need for an explicit memory barrier here as long as we
5191 	 * ensure the ordering of writing to the SPQ element
5192 	 * and updating of the SPQ producer which involves a memory
5193 	 * read. If the memory read is removed we will have to put a
5194 	 * full memory barrier there (inside bnx2x_sp_post()).
5195 	 */
5196 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5197 			     o->cids[cid_index], U64_HI(data_mapping),
5198 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
5199 }
5200 
5201 /**
5202  * bnx2x_q_send_deactivate - send DEACTIVATE command
5203  *
5204  * @bp:		device handle
5205  * @params:
5206  *
5207  * implemented using the UPDATE command.
5208  */
5209 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
5210 					struct bnx2x_queue_state_params *params)
5211 {
5212 	struct bnx2x_queue_update_params *update = &params->params.update;
5213 
5214 	memset(update, 0, sizeof(*update));
5215 
5216 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5217 
5218 	return bnx2x_q_send_update(bp, params);
5219 }
5220 
5221 /**
5222  * bnx2x_q_send_activate - send ACTIVATE command
5223  *
5224  * @bp:		device handle
5225  * @params:
5226  *
5227  * implemented using the UPDATE command.
5228  */
5229 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
5230 					struct bnx2x_queue_state_params *params)
5231 {
5232 	struct bnx2x_queue_update_params *update = &params->params.update;
5233 
5234 	memset(update, 0, sizeof(*update));
5235 
5236 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
5237 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5238 
5239 	return bnx2x_q_send_update(bp, params);
5240 }
5241 
5242 static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
5243 				struct bnx2x_queue_sp_obj *obj,
5244 				struct bnx2x_queue_update_tpa_params *params,
5245 				struct tpa_update_ramrod_data *data)
5246 {
5247 	data->client_id = obj->cl_id;
5248 	data->complete_on_both_clients = params->complete_on_both_clients;
5249 	data->dont_verify_rings_pause_thr_flg =
5250 		params->dont_verify_thr;
5251 	data->max_agg_size = cpu_to_le16(params->max_agg_sz);
5252 	data->max_sges_for_packet = params->max_sges_pkt;
5253 	data->max_tpa_queues = params->max_tpa_queues;
5254 	data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
5255 	data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
5256 	data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
5257 	data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
5258 	data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
5259 	data->tpa_mode = params->tpa_mode;
5260 	data->update_ipv4 = params->update_ipv4;
5261 	data->update_ipv6 = params->update_ipv6;
5262 }
5263 
5264 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
5265 					struct bnx2x_queue_state_params *params)
5266 {
5267 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5268 	struct tpa_update_ramrod_data *rdata =
5269 		(struct tpa_update_ramrod_data *)o->rdata;
5270 	dma_addr_t data_mapping = o->rdata_mapping;
5271 	struct bnx2x_queue_update_tpa_params *update_tpa_params =
5272 		&params->params.update_tpa;
5273 	u16 type;
5274 
5275 	/* Clear the ramrod data */
5276 	memset(rdata, 0, sizeof(*rdata));
5277 
5278 	/* Fill the ramrod data */
5279 	bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
5280 
5281 	/* Add the function id inside the type, so that sp post function
5282 	 * doesn't automatically add the PF func-id, this is required
5283 	 * for operations done by PFs on behalf of their VFs
5284 	 */
5285 	type = ETH_CONNECTION_TYPE |
5286 		((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
5287 
5288 	/* No need for an explicit memory barrier here as long as we
5289 	 * ensure the ordering of writing to the SPQ element
5290 	 * and updating of the SPQ producer which involves a memory
5291 	 * read. If the memory read is removed we will have to put a
5292 	 * full memory barrier there (inside bnx2x_sp_post()).
5293 	 */
5294 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
5295 			     o->cids[BNX2X_PRIMARY_CID_INDEX],
5296 			     U64_HI(data_mapping),
5297 			     U64_LO(data_mapping), type);
5298 }
5299 
5300 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
5301 				    struct bnx2x_queue_state_params *params)
5302 {
5303 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5304 
5305 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
5306 			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
5307 			     ETH_CONNECTION_TYPE);
5308 }
5309 
5310 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
5311 				       struct bnx2x_queue_state_params *params)
5312 {
5313 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5314 	u8 cid_idx = params->params.cfc_del.cid_index;
5315 
5316 	if (cid_idx >= o->max_cos) {
5317 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5318 			  o->cl_id, cid_idx);
5319 		return -EINVAL;
5320 	}
5321 
5322 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
5323 			     o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
5324 }
5325 
5326 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
5327 					struct bnx2x_queue_state_params *params)
5328 {
5329 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5330 	u8 cid_index = params->params.terminate.cid_index;
5331 
5332 	if (cid_index >= o->max_cos) {
5333 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5334 			  o->cl_id, cid_index);
5335 		return -EINVAL;
5336 	}
5337 
5338 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
5339 			     o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
5340 }
5341 
5342 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
5343 				     struct bnx2x_queue_state_params *params)
5344 {
5345 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5346 
5347 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
5348 			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
5349 			     ETH_CONNECTION_TYPE);
5350 }
5351 
5352 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
5353 					struct bnx2x_queue_state_params *params)
5354 {
5355 	switch (params->cmd) {
5356 	case BNX2X_Q_CMD_INIT:
5357 		return bnx2x_q_init(bp, params);
5358 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
5359 		return bnx2x_q_send_setup_tx_only(bp, params);
5360 	case BNX2X_Q_CMD_DEACTIVATE:
5361 		return bnx2x_q_send_deactivate(bp, params);
5362 	case BNX2X_Q_CMD_ACTIVATE:
5363 		return bnx2x_q_send_activate(bp, params);
5364 	case BNX2X_Q_CMD_UPDATE:
5365 		return bnx2x_q_send_update(bp, params);
5366 	case BNX2X_Q_CMD_UPDATE_TPA:
5367 		return bnx2x_q_send_update_tpa(bp, params);
5368 	case BNX2X_Q_CMD_HALT:
5369 		return bnx2x_q_send_halt(bp, params);
5370 	case BNX2X_Q_CMD_CFC_DEL:
5371 		return bnx2x_q_send_cfc_del(bp, params);
5372 	case BNX2X_Q_CMD_TERMINATE:
5373 		return bnx2x_q_send_terminate(bp, params);
5374 	case BNX2X_Q_CMD_EMPTY:
5375 		return bnx2x_q_send_empty(bp, params);
5376 	default:
5377 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5378 		return -EINVAL;
5379 	}
5380 }
5381 
5382 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
5383 				    struct bnx2x_queue_state_params *params)
5384 {
5385 	switch (params->cmd) {
5386 	case BNX2X_Q_CMD_SETUP:
5387 		return bnx2x_q_send_setup_e1x(bp, params);
5388 	case BNX2X_Q_CMD_INIT:
5389 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
5390 	case BNX2X_Q_CMD_DEACTIVATE:
5391 	case BNX2X_Q_CMD_ACTIVATE:
5392 	case BNX2X_Q_CMD_UPDATE:
5393 	case BNX2X_Q_CMD_UPDATE_TPA:
5394 	case BNX2X_Q_CMD_HALT:
5395 	case BNX2X_Q_CMD_CFC_DEL:
5396 	case BNX2X_Q_CMD_TERMINATE:
5397 	case BNX2X_Q_CMD_EMPTY:
5398 		return bnx2x_queue_send_cmd_cmn(bp, params);
5399 	default:
5400 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5401 		return -EINVAL;
5402 	}
5403 }
5404 
5405 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
5406 				   struct bnx2x_queue_state_params *params)
5407 {
5408 	switch (params->cmd) {
5409 	case BNX2X_Q_CMD_SETUP:
5410 		return bnx2x_q_send_setup_e2(bp, params);
5411 	case BNX2X_Q_CMD_INIT:
5412 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
5413 	case BNX2X_Q_CMD_DEACTIVATE:
5414 	case BNX2X_Q_CMD_ACTIVATE:
5415 	case BNX2X_Q_CMD_UPDATE:
5416 	case BNX2X_Q_CMD_UPDATE_TPA:
5417 	case BNX2X_Q_CMD_HALT:
5418 	case BNX2X_Q_CMD_CFC_DEL:
5419 	case BNX2X_Q_CMD_TERMINATE:
5420 	case BNX2X_Q_CMD_EMPTY:
5421 		return bnx2x_queue_send_cmd_cmn(bp, params);
5422 	default:
5423 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5424 		return -EINVAL;
5425 	}
5426 }
5427 
5428 /**
5429  * bnx2x_queue_chk_transition - check state machine of a regular Queue
5430  *
5431  * @bp:		device handle
5432  * @o:
5433  * @params:
5434  *
5435  * (not Forwarding)
5436  * It both checks if the requested command is legal in a current
5437  * state and, if it's legal, sets a `next_state' in the object
5438  * that will be used in the completion flow to set the `state'
5439  * of the object.
5440  *
5441  * returns 0 if a requested command is a legal transition,
5442  *         -EINVAL otherwise.
5443  */
5444 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5445 				      struct bnx2x_queue_sp_obj *o,
5446 				      struct bnx2x_queue_state_params *params)
5447 {
5448 	enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5449 	enum bnx2x_queue_cmd cmd = params->cmd;
5450 	struct bnx2x_queue_update_params *update_params =
5451 		 &params->params.update;
5452 	u8 next_tx_only = o->num_tx_only;
5453 
5454 	/* Forget all pending for completion commands if a driver only state
5455 	 * transition has been requested.
5456 	 */
5457 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5458 		o->pending = 0;
5459 		o->next_state = BNX2X_Q_STATE_MAX;
5460 	}
5461 
5462 	/* Don't allow a next state transition if we are in the middle of
5463 	 * the previous one.
5464 	 */
5465 	if (o->pending) {
5466 		BNX2X_ERR("Blocking transition since pending was %lx\n",
5467 			  o->pending);
5468 		return -EBUSY;
5469 	}
5470 
5471 	switch (state) {
5472 	case BNX2X_Q_STATE_RESET:
5473 		if (cmd == BNX2X_Q_CMD_INIT)
5474 			next_state = BNX2X_Q_STATE_INITIALIZED;
5475 
5476 		break;
5477 	case BNX2X_Q_STATE_INITIALIZED:
5478 		if (cmd == BNX2X_Q_CMD_SETUP) {
5479 			if (test_bit(BNX2X_Q_FLG_ACTIVE,
5480 				     &params->params.setup.flags))
5481 				next_state = BNX2X_Q_STATE_ACTIVE;
5482 			else
5483 				next_state = BNX2X_Q_STATE_INACTIVE;
5484 		}
5485 
5486 		break;
5487 	case BNX2X_Q_STATE_ACTIVE:
5488 		if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5489 			next_state = BNX2X_Q_STATE_INACTIVE;
5490 
5491 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5492 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5493 			next_state = BNX2X_Q_STATE_ACTIVE;
5494 
5495 		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5496 			next_state = BNX2X_Q_STATE_MULTI_COS;
5497 			next_tx_only = 1;
5498 		}
5499 
5500 		else if (cmd == BNX2X_Q_CMD_HALT)
5501 			next_state = BNX2X_Q_STATE_STOPPED;
5502 
5503 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5504 			/* If "active" state change is requested, update the
5505 			 *  state accordingly.
5506 			 */
5507 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5508 				     &update_params->update_flags) &&
5509 			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5510 				      &update_params->update_flags))
5511 				next_state = BNX2X_Q_STATE_INACTIVE;
5512 			else
5513 				next_state = BNX2X_Q_STATE_ACTIVE;
5514 		}
5515 
5516 		break;
5517 	case BNX2X_Q_STATE_MULTI_COS:
5518 		if (cmd == BNX2X_Q_CMD_TERMINATE)
5519 			next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5520 
5521 		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5522 			next_state = BNX2X_Q_STATE_MULTI_COS;
5523 			next_tx_only = o->num_tx_only + 1;
5524 		}
5525 
5526 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5527 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5528 			next_state = BNX2X_Q_STATE_MULTI_COS;
5529 
5530 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5531 			/* If "active" state change is requested, update the
5532 			 *  state accordingly.
5533 			 */
5534 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5535 				     &update_params->update_flags) &&
5536 			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5537 				      &update_params->update_flags))
5538 				next_state = BNX2X_Q_STATE_INACTIVE;
5539 			else
5540 				next_state = BNX2X_Q_STATE_MULTI_COS;
5541 		}
5542 
5543 		break;
5544 	case BNX2X_Q_STATE_MCOS_TERMINATED:
5545 		if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5546 			next_tx_only = o->num_tx_only - 1;
5547 			if (next_tx_only == 0)
5548 				next_state = BNX2X_Q_STATE_ACTIVE;
5549 			else
5550 				next_state = BNX2X_Q_STATE_MULTI_COS;
5551 		}
5552 
5553 		break;
5554 	case BNX2X_Q_STATE_INACTIVE:
5555 		if (cmd == BNX2X_Q_CMD_ACTIVATE)
5556 			next_state = BNX2X_Q_STATE_ACTIVE;
5557 
5558 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5559 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5560 			next_state = BNX2X_Q_STATE_INACTIVE;
5561 
5562 		else if (cmd == BNX2X_Q_CMD_HALT)
5563 			next_state = BNX2X_Q_STATE_STOPPED;
5564 
5565 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5566 			/* If "active" state change is requested, update the
5567 			 * state accordingly.
5568 			 */
5569 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5570 				     &update_params->update_flags) &&
5571 			    test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5572 				     &update_params->update_flags)){
5573 				if (o->num_tx_only == 0)
5574 					next_state = BNX2X_Q_STATE_ACTIVE;
5575 				else /* tx only queues exist for this queue */
5576 					next_state = BNX2X_Q_STATE_MULTI_COS;
5577 			} else
5578 				next_state = BNX2X_Q_STATE_INACTIVE;
5579 		}
5580 
5581 		break;
5582 	case BNX2X_Q_STATE_STOPPED:
5583 		if (cmd == BNX2X_Q_CMD_TERMINATE)
5584 			next_state = BNX2X_Q_STATE_TERMINATED;
5585 
5586 		break;
5587 	case BNX2X_Q_STATE_TERMINATED:
5588 		if (cmd == BNX2X_Q_CMD_CFC_DEL)
5589 			next_state = BNX2X_Q_STATE_RESET;
5590 
5591 		break;
5592 	default:
5593 		BNX2X_ERR("Illegal state: %d\n", state);
5594 	}
5595 
5596 	/* Transition is assured */
5597 	if (next_state != BNX2X_Q_STATE_MAX) {
5598 		DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5599 				 state, cmd, next_state);
5600 		o->next_state = next_state;
5601 		o->next_tx_only = next_tx_only;
5602 		return 0;
5603 	}
5604 
5605 	DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5606 
5607 	return -EINVAL;
5608 }
5609 
5610 void bnx2x_init_queue_obj(struct bnx2x *bp,
5611 			  struct bnx2x_queue_sp_obj *obj,
5612 			  u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5613 			  void *rdata,
5614 			  dma_addr_t rdata_mapping, unsigned long type)
5615 {
5616 	memset(obj, 0, sizeof(*obj));
5617 
5618 	/* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5619 	BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5620 
5621 	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5622 	obj->max_cos = cid_cnt;
5623 	obj->cl_id = cl_id;
5624 	obj->func_id = func_id;
5625 	obj->rdata = rdata;
5626 	obj->rdata_mapping = rdata_mapping;
5627 	obj->type = type;
5628 	obj->next_state = BNX2X_Q_STATE_MAX;
5629 
5630 	if (CHIP_IS_E1x(bp))
5631 		obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5632 	else
5633 		obj->send_cmd = bnx2x_queue_send_cmd_e2;
5634 
5635 	obj->check_transition = bnx2x_queue_chk_transition;
5636 
5637 	obj->complete_cmd = bnx2x_queue_comp_cmd;
5638 	obj->wait_comp = bnx2x_queue_wait_comp;
5639 	obj->set_pending = bnx2x_queue_set_pending;
5640 }
5641 
5642 /* return a queue object's logical state*/
5643 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5644 			       struct bnx2x_queue_sp_obj *obj)
5645 {
5646 	switch (obj->state) {
5647 	case BNX2X_Q_STATE_ACTIVE:
5648 	case BNX2X_Q_STATE_MULTI_COS:
5649 		return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5650 	case BNX2X_Q_STATE_RESET:
5651 	case BNX2X_Q_STATE_INITIALIZED:
5652 	case BNX2X_Q_STATE_MCOS_TERMINATED:
5653 	case BNX2X_Q_STATE_INACTIVE:
5654 	case BNX2X_Q_STATE_STOPPED:
5655 	case BNX2X_Q_STATE_TERMINATED:
5656 	case BNX2X_Q_STATE_FLRED:
5657 		return BNX2X_Q_LOGICAL_STATE_STOPPED;
5658 	default:
5659 		return -EINVAL;
5660 	}
5661 }
5662 
5663 /********************** Function state object *********************************/
5664 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5665 					   struct bnx2x_func_sp_obj *o)
5666 {
5667 	/* in the middle of transaction - return INVALID state */
5668 	if (o->pending)
5669 		return BNX2X_F_STATE_MAX;
5670 
5671 	/* unsure the order of reading of o->pending and o->state
5672 	 * o->pending should be read first
5673 	 */
5674 	rmb();
5675 
5676 	return o->state;
5677 }
5678 
5679 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5680 				struct bnx2x_func_sp_obj *o,
5681 				enum bnx2x_func_cmd cmd)
5682 {
5683 	return bnx2x_state_wait(bp, cmd, &o->pending);
5684 }
5685 
5686 /**
5687  * bnx2x_func_state_change_comp - complete the state machine transition
5688  *
5689  * @bp:		device handle
5690  * @o:
5691  * @cmd:
5692  *
5693  * Called on state change transition. Completes the state
5694  * machine transition only - no HW interaction.
5695  */
5696 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5697 					       struct bnx2x_func_sp_obj *o,
5698 					       enum bnx2x_func_cmd cmd)
5699 {
5700 	unsigned long cur_pending = o->pending;
5701 
5702 	if (!test_and_clear_bit(cmd, &cur_pending)) {
5703 		BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5704 			  cmd, BP_FUNC(bp), o->state,
5705 			  cur_pending, o->next_state);
5706 		return -EINVAL;
5707 	}
5708 
5709 	DP(BNX2X_MSG_SP,
5710 	   "Completing command %d for func %d, setting state to %d\n",
5711 	   cmd, BP_FUNC(bp), o->next_state);
5712 
5713 	o->state = o->next_state;
5714 	o->next_state = BNX2X_F_STATE_MAX;
5715 
5716 	/* It's important that o->state and o->next_state are
5717 	 * updated before o->pending.
5718 	 */
5719 	wmb();
5720 
5721 	clear_bit(cmd, &o->pending);
5722 	smp_mb__after_atomic();
5723 
5724 	return 0;
5725 }
5726 
5727 /**
5728  * bnx2x_func_comp_cmd - complete the state change command
5729  *
5730  * @bp:		device handle
5731  * @o:
5732  * @cmd:
5733  *
5734  * Checks that the arrived completion is expected.
5735  */
5736 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5737 			       struct bnx2x_func_sp_obj *o,
5738 			       enum bnx2x_func_cmd cmd)
5739 {
5740 	/* Complete the state machine part first, check if it's a
5741 	 * legal completion.
5742 	 */
5743 	int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5744 	return rc;
5745 }
5746 
5747 /**
5748  * bnx2x_func_chk_transition - perform function state machine transition
5749  *
5750  * @bp:		device handle
5751  * @o:
5752  * @params:
5753  *
5754  * It both checks if the requested command is legal in a current
5755  * state and, if it's legal, sets a `next_state' in the object
5756  * that will be used in the completion flow to set the `state'
5757  * of the object.
5758  *
5759  * returns 0 if a requested command is a legal transition,
5760  *         -EINVAL otherwise.
5761  */
5762 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5763 				     struct bnx2x_func_sp_obj *o,
5764 				     struct bnx2x_func_state_params *params)
5765 {
5766 	enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5767 	enum bnx2x_func_cmd cmd = params->cmd;
5768 
5769 	/* Forget all pending for completion commands if a driver only state
5770 	 * transition has been requested.
5771 	 */
5772 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5773 		o->pending = 0;
5774 		o->next_state = BNX2X_F_STATE_MAX;
5775 	}
5776 
5777 	/* Don't allow a next state transition if we are in the middle of
5778 	 * the previous one.
5779 	 */
5780 	if (o->pending)
5781 		return -EBUSY;
5782 
5783 	switch (state) {
5784 	case BNX2X_F_STATE_RESET:
5785 		if (cmd == BNX2X_F_CMD_HW_INIT)
5786 			next_state = BNX2X_F_STATE_INITIALIZED;
5787 
5788 		break;
5789 	case BNX2X_F_STATE_INITIALIZED:
5790 		if (cmd == BNX2X_F_CMD_START)
5791 			next_state = BNX2X_F_STATE_STARTED;
5792 
5793 		else if (cmd == BNX2X_F_CMD_HW_RESET)
5794 			next_state = BNX2X_F_STATE_RESET;
5795 
5796 		break;
5797 	case BNX2X_F_STATE_STARTED:
5798 		if (cmd == BNX2X_F_CMD_STOP)
5799 			next_state = BNX2X_F_STATE_INITIALIZED;
5800 		/* afex ramrods can be sent only in started mode, and only
5801 		 * if not pending for function_stop ramrod completion
5802 		 * for these events - next state remained STARTED.
5803 		 */
5804 		else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5805 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5806 			next_state = BNX2X_F_STATE_STARTED;
5807 
5808 		else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5809 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5810 			next_state = BNX2X_F_STATE_STARTED;
5811 
5812 		/* Switch_update ramrod can be sent in either started or
5813 		 * tx_stopped state, and it doesn't change the state.
5814 		 */
5815 		else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5816 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5817 			next_state = BNX2X_F_STATE_STARTED;
5818 
5819 		else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
5820 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5821 			next_state = BNX2X_F_STATE_STARTED;
5822 
5823 		else if (cmd == BNX2X_F_CMD_TX_STOP)
5824 			next_state = BNX2X_F_STATE_TX_STOPPED;
5825 
5826 		break;
5827 	case BNX2X_F_STATE_TX_STOPPED:
5828 		if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5829 		    (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5830 			next_state = BNX2X_F_STATE_TX_STOPPED;
5831 
5832 		else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
5833 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5834 			next_state = BNX2X_F_STATE_TX_STOPPED;
5835 
5836 		else if (cmd == BNX2X_F_CMD_TX_START)
5837 			next_state = BNX2X_F_STATE_STARTED;
5838 
5839 		break;
5840 	default:
5841 		BNX2X_ERR("Unknown state: %d\n", state);
5842 	}
5843 
5844 	/* Transition is assured */
5845 	if (next_state != BNX2X_F_STATE_MAX) {
5846 		DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5847 				 state, cmd, next_state);
5848 		o->next_state = next_state;
5849 		return 0;
5850 	}
5851 
5852 	DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5853 			 state, cmd);
5854 
5855 	return -EINVAL;
5856 }
5857 
5858 /**
5859  * bnx2x_func_init_func - performs HW init at function stage
5860  *
5861  * @bp:		device handle
5862  * @drv:
5863  *
5864  * Init HW when the current phase is
5865  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5866  * HW blocks.
5867  */
5868 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5869 				       const struct bnx2x_func_sp_drv_ops *drv)
5870 {
5871 	return drv->init_hw_func(bp);
5872 }
5873 
5874 /**
5875  * bnx2x_func_init_port - performs HW init at port stage
5876  *
5877  * @bp:		device handle
5878  * @drv:
5879  *
5880  * Init HW when the current phase is
5881  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5882  * FUNCTION-only HW blocks.
5883  *
5884  */
5885 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5886 				       const struct bnx2x_func_sp_drv_ops *drv)
5887 {
5888 	int rc = drv->init_hw_port(bp);
5889 	if (rc)
5890 		return rc;
5891 
5892 	return bnx2x_func_init_func(bp, drv);
5893 }
5894 
5895 /**
5896  * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5897  *
5898  * @bp:		device handle
5899  * @drv:
5900  *
5901  * Init HW when the current phase is
5902  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5903  * PORT-only and FUNCTION-only HW blocks.
5904  */
5905 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5906 					const struct bnx2x_func_sp_drv_ops *drv)
5907 {
5908 	int rc = drv->init_hw_cmn_chip(bp);
5909 	if (rc)
5910 		return rc;
5911 
5912 	return bnx2x_func_init_port(bp, drv);
5913 }
5914 
5915 /**
5916  * bnx2x_func_init_cmn - performs HW init at common stage
5917  *
5918  * @bp:		device handle
5919  * @drv:
5920  *
5921  * Init HW when the current phase is
5922  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5923  * PORT-only and FUNCTION-only HW blocks.
5924  */
5925 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5926 				      const struct bnx2x_func_sp_drv_ops *drv)
5927 {
5928 	int rc = drv->init_hw_cmn(bp);
5929 	if (rc)
5930 		return rc;
5931 
5932 	return bnx2x_func_init_port(bp, drv);
5933 }
5934 
5935 static int bnx2x_func_hw_init(struct bnx2x *bp,
5936 			      struct bnx2x_func_state_params *params)
5937 {
5938 	u32 load_code = params->params.hw_init.load_phase;
5939 	struct bnx2x_func_sp_obj *o = params->f_obj;
5940 	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5941 	int rc = 0;
5942 
5943 	DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5944 			 BP_ABS_FUNC(bp), load_code);
5945 
5946 	/* Prepare buffers for unzipping the FW */
5947 	rc = drv->gunzip_init(bp);
5948 	if (rc)
5949 		return rc;
5950 
5951 	/* Prepare FW */
5952 	rc = drv->init_fw(bp);
5953 	if (rc) {
5954 		BNX2X_ERR("Error loading firmware\n");
5955 		goto init_err;
5956 	}
5957 
5958 	/* Handle the beginning of COMMON_XXX pases separately... */
5959 	switch (load_code) {
5960 	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5961 		rc = bnx2x_func_init_cmn_chip(bp, drv);
5962 		if (rc)
5963 			goto init_err;
5964 
5965 		break;
5966 	case FW_MSG_CODE_DRV_LOAD_COMMON:
5967 		rc = bnx2x_func_init_cmn(bp, drv);
5968 		if (rc)
5969 			goto init_err;
5970 
5971 		break;
5972 	case FW_MSG_CODE_DRV_LOAD_PORT:
5973 		rc = bnx2x_func_init_port(bp, drv);
5974 		if (rc)
5975 			goto init_err;
5976 
5977 		break;
5978 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5979 		rc = bnx2x_func_init_func(bp, drv);
5980 		if (rc)
5981 			goto init_err;
5982 
5983 		break;
5984 	default:
5985 		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5986 		rc = -EINVAL;
5987 	}
5988 
5989 init_err:
5990 	drv->gunzip_end(bp);
5991 
5992 	/* In case of success, complete the command immediately: no ramrods
5993 	 * have been sent.
5994 	 */
5995 	if (!rc)
5996 		o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5997 
5998 	return rc;
5999 }
6000 
6001 /**
6002  * bnx2x_func_reset_func - reset HW at function stage
6003  *
6004  * @bp:		device handle
6005  * @drv:
6006  *
6007  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
6008  * FUNCTION-only HW blocks.
6009  */
6010 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
6011 					const struct bnx2x_func_sp_drv_ops *drv)
6012 {
6013 	drv->reset_hw_func(bp);
6014 }
6015 
6016 /**
6017  * bnx2x_func_reset_port - reset HW at port stage
6018  *
6019  * @bp:		device handle
6020  * @drv:
6021  *
6022  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
6023  * FUNCTION-only and PORT-only HW blocks.
6024  *
6025  *                 !!!IMPORTANT!!!
6026  *
6027  * It's important to call reset_port before reset_func() as the last thing
6028  * reset_func does is pf_disable() thus disabling PGLUE_B, which
6029  * makes impossible any DMAE transactions.
6030  */
6031 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
6032 					const struct bnx2x_func_sp_drv_ops *drv)
6033 {
6034 	drv->reset_hw_port(bp);
6035 	bnx2x_func_reset_func(bp, drv);
6036 }
6037 
6038 /**
6039  * bnx2x_func_reset_cmn - reset HW at common stage
6040  *
6041  * @bp:		device handle
6042  * @drv:
6043  *
6044  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
6045  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
6046  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
6047  */
6048 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
6049 					const struct bnx2x_func_sp_drv_ops *drv)
6050 {
6051 	bnx2x_func_reset_port(bp, drv);
6052 	drv->reset_hw_cmn(bp);
6053 }
6054 
6055 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
6056 				      struct bnx2x_func_state_params *params)
6057 {
6058 	u32 reset_phase = params->params.hw_reset.reset_phase;
6059 	struct bnx2x_func_sp_obj *o = params->f_obj;
6060 	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
6061 
6062 	DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
6063 			 reset_phase);
6064 
6065 	switch (reset_phase) {
6066 	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6067 		bnx2x_func_reset_cmn(bp, drv);
6068 		break;
6069 	case FW_MSG_CODE_DRV_UNLOAD_PORT:
6070 		bnx2x_func_reset_port(bp, drv);
6071 		break;
6072 	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6073 		bnx2x_func_reset_func(bp, drv);
6074 		break;
6075 	default:
6076 		BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
6077 			   reset_phase);
6078 		break;
6079 	}
6080 
6081 	/* Complete the command immediately: no ramrods have been sent. */
6082 	o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
6083 
6084 	return 0;
6085 }
6086 
6087 static inline int bnx2x_func_send_start(struct bnx2x *bp,
6088 					struct bnx2x_func_state_params *params)
6089 {
6090 	struct bnx2x_func_sp_obj *o = params->f_obj;
6091 	struct function_start_data *rdata =
6092 		(struct function_start_data *)o->rdata;
6093 	dma_addr_t data_mapping = o->rdata_mapping;
6094 	struct bnx2x_func_start_params *start_params = &params->params.start;
6095 
6096 	memset(rdata, 0, sizeof(*rdata));
6097 
6098 	/* Fill the ramrod data with provided parameters */
6099 	rdata->function_mode	= (u8)start_params->mf_mode;
6100 	rdata->sd_vlan_tag	= cpu_to_le16(start_params->sd_vlan_tag);
6101 	rdata->path_id		= BP_PATH(bp);
6102 	rdata->network_cos_mode	= start_params->network_cos_mode;
6103 
6104 	rdata->vxlan_dst_port	= cpu_to_le16(start_params->vxlan_dst_port);
6105 	rdata->geneve_dst_port	= cpu_to_le16(start_params->geneve_dst_port);
6106 	rdata->inner_clss_l2gre	= start_params->inner_clss_l2gre;
6107 	rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
6108 	rdata->inner_clss_vxlan	= start_params->inner_clss_vxlan;
6109 	rdata->inner_rss	= start_params->inner_rss;
6110 
6111 	rdata->sd_accept_mf_clss_fail = start_params->class_fail;
6112 	if (start_params->class_fail_ethtype) {
6113 		rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
6114 		rdata->sd_accept_mf_clss_fail_ethtype =
6115 			cpu_to_le16(start_params->class_fail_ethtype);
6116 	}
6117 
6118 	rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
6119 	rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
6120 	if (start_params->sd_vlan_eth_type)
6121 		rdata->sd_vlan_eth_type =
6122 			cpu_to_le16(start_params->sd_vlan_eth_type);
6123 	else
6124 		rdata->sd_vlan_eth_type =
6125 			cpu_to_le16(0x8100);
6126 
6127 	rdata->no_added_tags = start_params->no_added_tags;
6128 
6129 	rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
6130 	if (rdata->c2s_pri_tt_valid) {
6131 		memcpy(rdata->c2s_pri_trans_table.val,
6132 		       start_params->c2s_pri,
6133 		       MAX_VLAN_PRIORITIES);
6134 		rdata->c2s_pri_default = start_params->c2s_pri_default;
6135 	}
6136 	/* No need for an explicit memory barrier here as long we would
6137 	 * need to ensure the ordering of writing to the SPQ element
6138 	 * and updating of the SPQ producer which involves a memory
6139 	 * read and we will have to put a full memory barrier there
6140 	 * (inside bnx2x_sp_post()).
6141 	 */
6142 
6143 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
6144 			     U64_HI(data_mapping),
6145 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
6146 }
6147 
6148 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
6149 					struct bnx2x_func_state_params *params)
6150 {
6151 	struct bnx2x_func_sp_obj *o = params->f_obj;
6152 	struct function_update_data *rdata =
6153 		(struct function_update_data *)o->rdata;
6154 	dma_addr_t data_mapping = o->rdata_mapping;
6155 	struct bnx2x_func_switch_update_params *switch_update_params =
6156 		&params->params.switch_update;
6157 
6158 	memset(rdata, 0, sizeof(*rdata));
6159 
6160 	/* Fill the ramrod data with provided parameters */
6161 	if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
6162 		     &switch_update_params->changes)) {
6163 		rdata->tx_switch_suspend_change_flg = 1;
6164 		rdata->tx_switch_suspend =
6165 			test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
6166 				 &switch_update_params->changes);
6167 	}
6168 
6169 	if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
6170 		     &switch_update_params->changes)) {
6171 		rdata->sd_vlan_tag_change_flg = 1;
6172 		rdata->sd_vlan_tag =
6173 			cpu_to_le16(switch_update_params->vlan);
6174 	}
6175 
6176 	if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
6177 		     &switch_update_params->changes)) {
6178 		rdata->sd_vlan_eth_type_change_flg = 1;
6179 		rdata->sd_vlan_eth_type =
6180 			cpu_to_le16(switch_update_params->vlan_eth_type);
6181 	}
6182 
6183 	if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
6184 		     &switch_update_params->changes)) {
6185 		rdata->sd_vlan_force_pri_change_flg = 1;
6186 		if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
6187 			     &switch_update_params->changes))
6188 			rdata->sd_vlan_force_pri_flg = 1;
6189 		rdata->sd_vlan_force_pri_flg =
6190 			switch_update_params->vlan_force_prio;
6191 	}
6192 
6193 	if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
6194 		     &switch_update_params->changes)) {
6195 		rdata->update_tunn_cfg_flg = 1;
6196 		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
6197 			     &switch_update_params->changes))
6198 			rdata->inner_clss_l2gre = 1;
6199 		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
6200 			     &switch_update_params->changes))
6201 			rdata->inner_clss_vxlan = 1;
6202 		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
6203 			     &switch_update_params->changes))
6204 			rdata->inner_clss_l2geneve = 1;
6205 		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
6206 			     &switch_update_params->changes))
6207 			rdata->inner_rss = 1;
6208 		rdata->vxlan_dst_port =
6209 			cpu_to_le16(switch_update_params->vxlan_dst_port);
6210 		rdata->geneve_dst_port =
6211 			cpu_to_le16(switch_update_params->geneve_dst_port);
6212 	}
6213 
6214 	rdata->echo = SWITCH_UPDATE;
6215 
6216 	/* No need for an explicit memory barrier here as long as we
6217 	 * ensure the ordering of writing to the SPQ element
6218 	 * and updating of the SPQ producer which involves a memory
6219 	 * read. If the memory read is removed we will have to put a
6220 	 * full memory barrier there (inside bnx2x_sp_post()).
6221 	 */
6222 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6223 			     U64_HI(data_mapping),
6224 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
6225 }
6226 
6227 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
6228 					 struct bnx2x_func_state_params *params)
6229 {
6230 	struct bnx2x_func_sp_obj *o = params->f_obj;
6231 	struct function_update_data *rdata =
6232 		(struct function_update_data *)o->afex_rdata;
6233 	dma_addr_t data_mapping = o->afex_rdata_mapping;
6234 	struct bnx2x_func_afex_update_params *afex_update_params =
6235 		&params->params.afex_update;
6236 
6237 	memset(rdata, 0, sizeof(*rdata));
6238 
6239 	/* Fill the ramrod data with provided parameters */
6240 	rdata->vif_id_change_flg = 1;
6241 	rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
6242 	rdata->afex_default_vlan_change_flg = 1;
6243 	rdata->afex_default_vlan =
6244 		cpu_to_le16(afex_update_params->afex_default_vlan);
6245 	rdata->allowed_priorities_change_flg = 1;
6246 	rdata->allowed_priorities = afex_update_params->allowed_priorities;
6247 	rdata->echo = AFEX_UPDATE;
6248 
6249 	/* No need for an explicit memory barrier here as long as we
6250 	 * ensure the ordering of writing to the SPQ element
6251 	 * and updating of the SPQ producer which involves a memory
6252 	 * read. If the memory read is removed we will have to put a
6253 	 * full memory barrier there (inside bnx2x_sp_post()).
6254 	 */
6255 	DP(BNX2X_MSG_SP,
6256 	   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
6257 	   rdata->vif_id,
6258 	   rdata->afex_default_vlan, rdata->allowed_priorities);
6259 
6260 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6261 			     U64_HI(data_mapping),
6262 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
6263 }
6264 
6265 static
6266 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
6267 					 struct bnx2x_func_state_params *params)
6268 {
6269 	struct bnx2x_func_sp_obj *o = params->f_obj;
6270 	struct afex_vif_list_ramrod_data *rdata =
6271 		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
6272 	struct bnx2x_func_afex_viflists_params *afex_vif_params =
6273 		&params->params.afex_viflists;
6274 	u64 *p_rdata = (u64 *)rdata;
6275 
6276 	memset(rdata, 0, sizeof(*rdata));
6277 
6278 	/* Fill the ramrod data with provided parameters */
6279 	rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
6280 	rdata->func_bit_map          = afex_vif_params->func_bit_map;
6281 	rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
6282 	rdata->func_to_clear         = afex_vif_params->func_to_clear;
6283 
6284 	/* send in echo type of sub command */
6285 	rdata->echo = afex_vif_params->afex_vif_list_command;
6286 
6287 	/*  No need for an explicit memory barrier here as long we would
6288 	 *  need to ensure the ordering of writing to the SPQ element
6289 	 *  and updating of the SPQ producer which involves a memory
6290 	 *  read and we will have to put a full memory barrier there
6291 	 *  (inside bnx2x_sp_post()).
6292 	 */
6293 
6294 	DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
6295 	   rdata->afex_vif_list_command, rdata->vif_list_index,
6296 	   rdata->func_bit_map, rdata->func_to_clear);
6297 
6298 	/* this ramrod sends data directly and not through DMA mapping */
6299 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
6300 			     U64_HI(*p_rdata), U64_LO(*p_rdata),
6301 			     NONE_CONNECTION_TYPE);
6302 }
6303 
6304 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
6305 				       struct bnx2x_func_state_params *params)
6306 {
6307 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
6308 			     NONE_CONNECTION_TYPE);
6309 }
6310 
6311 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
6312 				       struct bnx2x_func_state_params *params)
6313 {
6314 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
6315 			     NONE_CONNECTION_TYPE);
6316 }
6317 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
6318 				       struct bnx2x_func_state_params *params)
6319 {
6320 	struct bnx2x_func_sp_obj *o = params->f_obj;
6321 	struct flow_control_configuration *rdata =
6322 		(struct flow_control_configuration *)o->rdata;
6323 	dma_addr_t data_mapping = o->rdata_mapping;
6324 	struct bnx2x_func_tx_start_params *tx_start_params =
6325 		&params->params.tx_start;
6326 	int i;
6327 
6328 	memset(rdata, 0, sizeof(*rdata));
6329 
6330 	rdata->dcb_enabled = tx_start_params->dcb_enabled;
6331 	rdata->dcb_version = tx_start_params->dcb_version;
6332 	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
6333 
6334 	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6335 		rdata->traffic_type_to_priority_cos[i] =
6336 			tx_start_params->traffic_type_to_priority_cos[i];
6337 
6338 	for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
6339 		rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
6340 	/* No need for an explicit memory barrier here as long as we
6341 	 * ensure the ordering of writing to the SPQ element
6342 	 * and updating of the SPQ producer which involves a memory
6343 	 * read. If the memory read is removed we will have to put a
6344 	 * full memory barrier there (inside bnx2x_sp_post()).
6345 	 */
6346 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6347 			     U64_HI(data_mapping),
6348 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
6349 }
6350 
6351 static inline
6352 int bnx2x_func_send_set_timesync(struct bnx2x *bp,
6353 				 struct bnx2x_func_state_params *params)
6354 {
6355 	struct bnx2x_func_sp_obj *o = params->f_obj;
6356 	struct set_timesync_ramrod_data *rdata =
6357 		(struct set_timesync_ramrod_data *)o->rdata;
6358 	dma_addr_t data_mapping = o->rdata_mapping;
6359 	struct bnx2x_func_set_timesync_params *set_timesync_params =
6360 		&params->params.set_timesync;
6361 
6362 	memset(rdata, 0, sizeof(*rdata));
6363 
6364 	/* Fill the ramrod data with provided parameters */
6365 	rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
6366 	rdata->offset_cmd = set_timesync_params->offset_cmd;
6367 	rdata->add_sub_drift_adjust_value =
6368 		set_timesync_params->add_sub_drift_adjust_value;
6369 	rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
6370 	rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
6371 	rdata->offset_delta.lo =
6372 		cpu_to_le32(U64_LO(set_timesync_params->offset_delta));
6373 	rdata->offset_delta.hi =
6374 		cpu_to_le32(U64_HI(set_timesync_params->offset_delta));
6375 
6376 	DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
6377 	   rdata->drift_adjust_cmd, rdata->offset_cmd,
6378 	   rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
6379 	   rdata->drift_adjust_period, rdata->offset_delta.lo,
6380 	   rdata->offset_delta.hi);
6381 
6382 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
6383 			     U64_HI(data_mapping),
6384 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
6385 }
6386 
6387 static int bnx2x_func_send_cmd(struct bnx2x *bp,
6388 			       struct bnx2x_func_state_params *params)
6389 {
6390 	switch (params->cmd) {
6391 	case BNX2X_F_CMD_HW_INIT:
6392 		return bnx2x_func_hw_init(bp, params);
6393 	case BNX2X_F_CMD_START:
6394 		return bnx2x_func_send_start(bp, params);
6395 	case BNX2X_F_CMD_STOP:
6396 		return bnx2x_func_send_stop(bp, params);
6397 	case BNX2X_F_CMD_HW_RESET:
6398 		return bnx2x_func_hw_reset(bp, params);
6399 	case BNX2X_F_CMD_AFEX_UPDATE:
6400 		return bnx2x_func_send_afex_update(bp, params);
6401 	case BNX2X_F_CMD_AFEX_VIFLISTS:
6402 		return bnx2x_func_send_afex_viflists(bp, params);
6403 	case BNX2X_F_CMD_TX_STOP:
6404 		return bnx2x_func_send_tx_stop(bp, params);
6405 	case BNX2X_F_CMD_TX_START:
6406 		return bnx2x_func_send_tx_start(bp, params);
6407 	case BNX2X_F_CMD_SWITCH_UPDATE:
6408 		return bnx2x_func_send_switch_update(bp, params);
6409 	case BNX2X_F_CMD_SET_TIMESYNC:
6410 		return bnx2x_func_send_set_timesync(bp, params);
6411 	default:
6412 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
6413 		return -EINVAL;
6414 	}
6415 }
6416 
6417 void bnx2x_init_func_obj(struct bnx2x *bp,
6418 			 struct bnx2x_func_sp_obj *obj,
6419 			 void *rdata, dma_addr_t rdata_mapping,
6420 			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
6421 			 struct bnx2x_func_sp_drv_ops *drv_iface)
6422 {
6423 	memset(obj, 0, sizeof(*obj));
6424 
6425 	mutex_init(&obj->one_pending_mutex);
6426 
6427 	obj->rdata = rdata;
6428 	obj->rdata_mapping = rdata_mapping;
6429 	obj->afex_rdata = afex_rdata;
6430 	obj->afex_rdata_mapping = afex_rdata_mapping;
6431 	obj->send_cmd = bnx2x_func_send_cmd;
6432 	obj->check_transition = bnx2x_func_chk_transition;
6433 	obj->complete_cmd = bnx2x_func_comp_cmd;
6434 	obj->wait_comp = bnx2x_func_wait_comp;
6435 
6436 	obj->drv = drv_iface;
6437 }
6438 
6439 /**
6440  * bnx2x_func_state_change - perform Function state change transition
6441  *
6442  * @bp:		device handle
6443  * @params:	parameters to perform the transaction
6444  *
6445  * returns 0 in case of successfully completed transition,
6446  *         negative error code in case of failure, positive
6447  *         (EBUSY) value if there is a completion to that is
6448  *         still pending (possible only if RAMROD_COMP_WAIT is
6449  *         not set in params->ramrod_flags for asynchronous
6450  *         commands).
6451  */
6452 int bnx2x_func_state_change(struct bnx2x *bp,
6453 			    struct bnx2x_func_state_params *params)
6454 {
6455 	struct bnx2x_func_sp_obj *o = params->f_obj;
6456 	int rc, cnt = 300;
6457 	enum bnx2x_func_cmd cmd = params->cmd;
6458 	unsigned long *pending = &o->pending;
6459 
6460 	mutex_lock(&o->one_pending_mutex);
6461 
6462 	/* Check that the requested transition is legal */
6463 	rc = o->check_transition(bp, o, params);
6464 	if ((rc == -EBUSY) &&
6465 	    (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
6466 		while ((rc == -EBUSY) && (--cnt > 0)) {
6467 			mutex_unlock(&o->one_pending_mutex);
6468 			msleep(10);
6469 			mutex_lock(&o->one_pending_mutex);
6470 			rc = o->check_transition(bp, o, params);
6471 		}
6472 		if (rc == -EBUSY) {
6473 			mutex_unlock(&o->one_pending_mutex);
6474 			BNX2X_ERR("timeout waiting for previous ramrod completion\n");
6475 			return rc;
6476 		}
6477 	} else if (rc) {
6478 		mutex_unlock(&o->one_pending_mutex);
6479 		return rc;
6480 	}
6481 
6482 	/* Set "pending" bit */
6483 	set_bit(cmd, pending);
6484 
6485 	/* Don't send a command if only driver cleanup was requested */
6486 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6487 		bnx2x_func_state_change_comp(bp, o, cmd);
6488 		mutex_unlock(&o->one_pending_mutex);
6489 	} else {
6490 		/* Send a ramrod */
6491 		rc = o->send_cmd(bp, params);
6492 
6493 		mutex_unlock(&o->one_pending_mutex);
6494 
6495 		if (rc) {
6496 			o->next_state = BNX2X_F_STATE_MAX;
6497 			clear_bit(cmd, pending);
6498 			smp_mb__after_atomic();
6499 			return rc;
6500 		}
6501 
6502 		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6503 			rc = o->wait_comp(bp, o, cmd);
6504 			if (rc)
6505 				return rc;
6506 
6507 			return 0;
6508 		}
6509 	}
6510 
6511 	return !!test_bit(cmd, pending);
6512 }
6513