1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2011-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
30 
31 #define BNX2X_MAX_EMUL_MULTI		16
32 
33 /**** Exe Queue interfaces ****/
34 
35 /**
36  * bnx2x_exe_queue_init - init the Exe Queue object
37  *
38  * @o:		pointer to the object
39  * @exe_len:	length
40  * @owner:	pointer to the owner
41  * @validate:	validate function pointer
42  * @optimize:	optimize function pointer
43  * @exec:	execute function pointer
44  * @get:	get function pointer
45  */
46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 					struct bnx2x_exe_queue_obj *o,
48 					int exe_len,
49 					union bnx2x_qable_obj *owner,
50 					exe_q_validate validate,
51 					exe_q_remove remove,
52 					exe_q_optimize optimize,
53 					exe_q_execute exec,
54 					exe_q_get get)
55 {
56 	memset(o, 0, sizeof(*o));
57 
58 	INIT_LIST_HEAD(&o->exe_queue);
59 	INIT_LIST_HEAD(&o->pending_comp);
60 
61 	spin_lock_init(&o->lock);
62 
63 	o->exe_chunk_len = exe_len;
64 	o->owner         = owner;
65 
66 	/* Owner specific callbacks */
67 	o->validate      = validate;
68 	o->remove        = remove;
69 	o->optimize      = optimize;
70 	o->execute       = exec;
71 	o->get           = get;
72 
73 	DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
74 	   exe_len);
75 }
76 
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 					     struct bnx2x_exeq_elem *elem)
79 {
80 	DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81 	kfree(elem);
82 }
83 
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
85 {
86 	struct bnx2x_exeq_elem *elem;
87 	int cnt = 0;
88 
89 	spin_lock_bh(&o->lock);
90 
91 	list_for_each_entry(elem, &o->exe_queue, link)
92 		cnt++;
93 
94 	spin_unlock_bh(&o->lock);
95 
96 	return cnt;
97 }
98 
99 /**
100  * bnx2x_exe_queue_add - add a new element to the execution queue
101  *
102  * @bp:		driver handle
103  * @o:		queue
104  * @cmd:	new command to add
105  * @restore:	true - do not optimize the command
106  *
107  * If the element is optimized or is illegal, frees it.
108  */
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 				      struct bnx2x_exe_queue_obj *o,
111 				      struct bnx2x_exeq_elem *elem,
112 				      bool restore)
113 {
114 	int rc;
115 
116 	spin_lock_bh(&o->lock);
117 
118 	if (!restore) {
119 		/* Try to cancel this element queue */
120 		rc = o->optimize(bp, o->owner, elem);
121 		if (rc)
122 			goto free_and_exit;
123 
124 		/* Check if this request is ok */
125 		rc = o->validate(bp, o->owner, elem);
126 		if (rc) {
127 			DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
128 			goto free_and_exit;
129 		}
130 	}
131 
132 	/* If so, add it to the execution queue */
133 	list_add_tail(&elem->link, &o->exe_queue);
134 
135 	spin_unlock_bh(&o->lock);
136 
137 	return 0;
138 
139 free_and_exit:
140 	bnx2x_exe_queue_free_elem(bp, elem);
141 
142 	spin_unlock_bh(&o->lock);
143 
144 	return rc;
145 }
146 
147 static inline void __bnx2x_exe_queue_reset_pending(
148 	struct bnx2x *bp,
149 	struct bnx2x_exe_queue_obj *o)
150 {
151 	struct bnx2x_exeq_elem *elem;
152 
153 	while (!list_empty(&o->pending_comp)) {
154 		elem = list_first_entry(&o->pending_comp,
155 					struct bnx2x_exeq_elem, link);
156 
157 		list_del(&elem->link);
158 		bnx2x_exe_queue_free_elem(bp, elem);
159 	}
160 }
161 
162 /**
163  * bnx2x_exe_queue_step - execute one execution chunk atomically
164  *
165  * @bp:			driver handle
166  * @o:			queue
167  * @ramrod_flags:	flags
168  *
169  * (Should be called while holding the exe_queue->lock).
170  */
171 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
172 				       struct bnx2x_exe_queue_obj *o,
173 				       unsigned long *ramrod_flags)
174 {
175 	struct bnx2x_exeq_elem *elem, spacer;
176 	int cur_len = 0, rc;
177 
178 	memset(&spacer, 0, sizeof(spacer));
179 
180 	/* Next step should not be performed until the current is finished,
181 	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
182 	 * properly clear object internals without sending any command to the FW
183 	 * which also implies there won't be any completion to clear the
184 	 * 'pending' list.
185 	 */
186 	if (!list_empty(&o->pending_comp)) {
187 		if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
188 			DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
189 			__bnx2x_exe_queue_reset_pending(bp, o);
190 		} else {
191 			return 1;
192 		}
193 	}
194 
195 	/* Run through the pending commands list and create a next
196 	 * execution chunk.
197 	 */
198 	while (!list_empty(&o->exe_queue)) {
199 		elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
200 					link);
201 		WARN_ON(!elem->cmd_len);
202 
203 		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
204 			cur_len += elem->cmd_len;
205 			/* Prevent from both lists being empty when moving an
206 			 * element. This will allow the call of
207 			 * bnx2x_exe_queue_empty() without locking.
208 			 */
209 			list_add_tail(&spacer.link, &o->pending_comp);
210 			mb();
211 			list_move_tail(&elem->link, &o->pending_comp);
212 			list_del(&spacer.link);
213 		} else
214 			break;
215 	}
216 
217 	/* Sanity check */
218 	if (!cur_len)
219 		return 0;
220 
221 	rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
222 	if (rc < 0)
223 		/* In case of an error return the commands back to the queue
224 		 * and reset the pending_comp.
225 		 */
226 		list_splice_init(&o->pending_comp, &o->exe_queue);
227 	else if (!rc)
228 		/* If zero is returned, means there are no outstanding pending
229 		 * completions and we may dismiss the pending list.
230 		 */
231 		__bnx2x_exe_queue_reset_pending(bp, o);
232 
233 	return rc;
234 }
235 
236 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
237 {
238 	bool empty = list_empty(&o->exe_queue);
239 
240 	/* Don't reorder!!! */
241 	mb();
242 
243 	return empty && list_empty(&o->pending_comp);
244 }
245 
246 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
247 	struct bnx2x *bp)
248 {
249 	DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
250 	return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
251 }
252 
253 /************************ raw_obj functions ***********************************/
254 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
255 {
256 	return !!test_bit(o->state, o->pstate);
257 }
258 
259 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
260 {
261 	smp_mb__before_clear_bit();
262 	clear_bit(o->state, o->pstate);
263 	smp_mb__after_clear_bit();
264 }
265 
266 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
267 {
268 	smp_mb__before_clear_bit();
269 	set_bit(o->state, o->pstate);
270 	smp_mb__after_clear_bit();
271 }
272 
273 /**
274  * bnx2x_state_wait - wait until the given bit(state) is cleared
275  *
276  * @bp:		device handle
277  * @state:	state which is to be cleared
278  * @state_p:	state buffer
279  *
280  */
281 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
282 				   unsigned long *pstate)
283 {
284 	/* can take a while if any port is running */
285 	int cnt = 5000;
286 
287 	if (CHIP_REV_IS_EMUL(bp))
288 		cnt *= 20;
289 
290 	DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
291 
292 	might_sleep();
293 	while (cnt--) {
294 		if (!test_bit(state, pstate)) {
295 #ifdef BNX2X_STOP_ON_ERROR
296 			DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
297 #endif
298 			return 0;
299 		}
300 
301 		usleep_range(1000, 2000);
302 
303 		if (bp->panic)
304 			return -EIO;
305 	}
306 
307 	/* timeout! */
308 	BNX2X_ERR("timeout waiting for state %d\n", state);
309 #ifdef BNX2X_STOP_ON_ERROR
310 	bnx2x_panic();
311 #endif
312 
313 	return -EBUSY;
314 }
315 
316 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
317 {
318 	return bnx2x_state_wait(bp, raw->state, raw->pstate);
319 }
320 
321 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
322 /* credit handling callbacks */
323 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
324 {
325 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
326 
327 	WARN_ON(!mp);
328 
329 	return mp->get_entry(mp, offset);
330 }
331 
332 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
333 {
334 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
335 
336 	WARN_ON(!mp);
337 
338 	return mp->get(mp, 1);
339 }
340 
341 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
342 {
343 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
344 
345 	WARN_ON(!vp);
346 
347 	return vp->get_entry(vp, offset);
348 }
349 
350 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
351 {
352 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
353 
354 	WARN_ON(!vp);
355 
356 	return vp->get(vp, 1);
357 }
358 
359 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
360 {
361 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
363 
364 	if (!mp->get(mp, 1))
365 		return false;
366 
367 	if (!vp->get(vp, 1)) {
368 		mp->put(mp, 1);
369 		return false;
370 	}
371 
372 	return true;
373 }
374 
375 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
376 {
377 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
378 
379 	return mp->put_entry(mp, offset);
380 }
381 
382 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
383 {
384 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
385 
386 	return mp->put(mp, 1);
387 }
388 
389 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
390 {
391 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
392 
393 	return vp->put_entry(vp, offset);
394 }
395 
396 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
397 {
398 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
399 
400 	return vp->put(vp, 1);
401 }
402 
403 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
404 {
405 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
406 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
407 
408 	if (!mp->put(mp, 1))
409 		return false;
410 
411 	if (!vp->put(vp, 1)) {
412 		mp->get(mp, 1);
413 		return false;
414 	}
415 
416 	return true;
417 }
418 
419 /**
420  * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
421  *
422  * @bp:		device handle
423  * @o:		vlan_mac object
424  *
425  * @details: Non-blocking implementation; should be called under execution
426  *           queue lock.
427  */
428 static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
429 					    struct bnx2x_vlan_mac_obj *o)
430 {
431 	if (o->head_reader) {
432 		DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
433 		return -EBUSY;
434 	}
435 
436 	DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
437 	return 0;
438 }
439 
440 /**
441  * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
442  *
443  * @bp:		device handle
444  * @o:		vlan_mac object
445  *
446  * @details Should be called under execution queue lock; notice it might release
447  *          and reclaim it during its run.
448  */
449 static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
450 					    struct bnx2x_vlan_mac_obj *o)
451 {
452 	int rc;
453 	unsigned long ramrod_flags = o->saved_ramrod_flags;
454 
455 	DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
456 	   ramrod_flags);
457 	o->head_exe_request = false;
458 	o->saved_ramrod_flags = 0;
459 	rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
460 	if (rc != 0) {
461 		BNX2X_ERR("execution of pending commands failed with rc %d\n",
462 			  rc);
463 #ifdef BNX2X_STOP_ON_ERROR
464 		bnx2x_panic();
465 #endif
466 	}
467 }
468 
469 /**
470  * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
471  *
472  * @bp:			device handle
473  * @o:			vlan_mac object
474  * @ramrod_flags:	ramrod flags of missed execution
475  *
476  * @details Should be called under execution queue lock.
477  */
478 static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
479 				    struct bnx2x_vlan_mac_obj *o,
480 				    unsigned long ramrod_flags)
481 {
482 	o->head_exe_request = true;
483 	o->saved_ramrod_flags = ramrod_flags;
484 	DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
485 	   ramrod_flags);
486 }
487 
488 /**
489  * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
490  *
491  * @bp:			device handle
492  * @o:			vlan_mac object
493  *
494  * @details Should be called under execution queue lock. Notice if a pending
495  *          execution exists, it would perform it - possibly releasing and
496  *          reclaiming the execution queue lock.
497  */
498 static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
499 					    struct bnx2x_vlan_mac_obj *o)
500 {
501 	/* It's possible a new pending execution was added since this writer
502 	 * executed. If so, execute again. [Ad infinitum]
503 	 */
504 	while (o->head_exe_request) {
505 		DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
506 		__bnx2x_vlan_mac_h_exec_pending(bp, o);
507 	}
508 }
509 
510 /**
511  * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
512  *
513  * @bp:			device handle
514  * @o:			vlan_mac object
515  *
516  * @details Notice if a pending execution exists, it would perform it -
517  *          possibly releasing and reclaiming the execution queue lock.
518  */
519 void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
520 				   struct bnx2x_vlan_mac_obj *o)
521 {
522 	spin_lock_bh(&o->exe_queue.lock);
523 	__bnx2x_vlan_mac_h_write_unlock(bp, o);
524 	spin_unlock_bh(&o->exe_queue.lock);
525 }
526 
527 /**
528  * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
529  *
530  * @bp:			device handle
531  * @o:			vlan_mac object
532  *
533  * @details Should be called under the execution queue lock. May sleep. May
534  *          release and reclaim execution queue lock during its run.
535  */
536 static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
537 					struct bnx2x_vlan_mac_obj *o)
538 {
539 	/* If we got here, we're holding lock --> no WRITER exists */
540 	o->head_reader++;
541 	DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
542 	   o->head_reader);
543 
544 	return 0;
545 }
546 
547 /**
548  * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
549  *
550  * @bp:			device handle
551  * @o:			vlan_mac object
552  *
553  * @details May sleep. Claims and releases execution queue lock during its run.
554  */
555 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
556 			       struct bnx2x_vlan_mac_obj *o)
557 {
558 	int rc;
559 
560 	spin_lock_bh(&o->exe_queue.lock);
561 	rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
562 	spin_unlock_bh(&o->exe_queue.lock);
563 
564 	return rc;
565 }
566 
567 /**
568  * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
569  *
570  * @bp:			device handle
571  * @o:			vlan_mac object
572  *
573  * @details Should be called under execution queue lock. Notice if a pending
574  *          execution exists, it would be performed if this was the last
575  *          reader. possibly releasing and reclaiming the execution queue lock.
576  */
577 static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
578 					  struct bnx2x_vlan_mac_obj *o)
579 {
580 	if (!o->head_reader) {
581 		BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
582 #ifdef BNX2X_STOP_ON_ERROR
583 		bnx2x_panic();
584 #endif
585 	} else {
586 		o->head_reader--;
587 		DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
588 		   o->head_reader);
589 	}
590 
591 	/* It's possible a new pending execution was added, and that this reader
592 	 * was last - if so we need to execute the command.
593 	 */
594 	if (!o->head_reader && o->head_exe_request) {
595 		DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
596 
597 		/* Writer release will do the trick */
598 		__bnx2x_vlan_mac_h_write_unlock(bp, o);
599 	}
600 }
601 
602 /**
603  * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
604  *
605  * @bp:			device handle
606  * @o:			vlan_mac object
607  *
608  * @details Notice if a pending execution exists, it would be performed if this
609  *          was the last reader. Claims and releases the execution queue lock
610  *          during its run.
611  */
612 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
613 				  struct bnx2x_vlan_mac_obj *o)
614 {
615 	spin_lock_bh(&o->exe_queue.lock);
616 	__bnx2x_vlan_mac_h_read_unlock(bp, o);
617 	spin_unlock_bh(&o->exe_queue.lock);
618 }
619 
620 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
621 				int n, u8 *base, u8 stride, u8 size)
622 {
623 	struct bnx2x_vlan_mac_registry_elem *pos;
624 	u8 *next = base;
625 	int counter = 0;
626 	int read_lock;
627 
628 	DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
629 	read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
630 	if (read_lock != 0)
631 		BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
632 
633 	/* traverse list */
634 	list_for_each_entry(pos, &o->head, link) {
635 		if (counter < n) {
636 			memcpy(next, &pos->u, size);
637 			counter++;
638 			DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
639 			   counter, next);
640 			next += stride + size;
641 		}
642 	}
643 
644 	if (read_lock == 0) {
645 		DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
646 		bnx2x_vlan_mac_h_read_unlock(bp, o);
647 	}
648 
649 	return counter * ETH_ALEN;
650 }
651 
652 /* check_add() callbacks */
653 static int bnx2x_check_mac_add(struct bnx2x *bp,
654 			       struct bnx2x_vlan_mac_obj *o,
655 			       union bnx2x_classification_ramrod_data *data)
656 {
657 	struct bnx2x_vlan_mac_registry_elem *pos;
658 
659 	DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
660 
661 	if (!is_valid_ether_addr(data->mac.mac))
662 		return -EINVAL;
663 
664 	/* Check if a requested MAC already exists */
665 	list_for_each_entry(pos, &o->head, link)
666 		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
667 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
668 			return -EEXIST;
669 
670 	return 0;
671 }
672 
673 static int bnx2x_check_vlan_add(struct bnx2x *bp,
674 				struct bnx2x_vlan_mac_obj *o,
675 				union bnx2x_classification_ramrod_data *data)
676 {
677 	struct bnx2x_vlan_mac_registry_elem *pos;
678 
679 	DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
680 
681 	list_for_each_entry(pos, &o->head, link)
682 		if (data->vlan.vlan == pos->u.vlan.vlan)
683 			return -EEXIST;
684 
685 	return 0;
686 }
687 
688 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
689 				    struct bnx2x_vlan_mac_obj *o,
690 				   union bnx2x_classification_ramrod_data *data)
691 {
692 	struct bnx2x_vlan_mac_registry_elem *pos;
693 
694 	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
695 	   data->vlan_mac.mac, data->vlan_mac.vlan);
696 
697 	list_for_each_entry(pos, &o->head, link)
698 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
699 		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
700 				  ETH_ALEN)) &&
701 		    (data->vlan_mac.is_inner_mac ==
702 		     pos->u.vlan_mac.is_inner_mac))
703 			return -EEXIST;
704 
705 	return 0;
706 }
707 
708 /* check_del() callbacks */
709 static struct bnx2x_vlan_mac_registry_elem *
710 	bnx2x_check_mac_del(struct bnx2x *bp,
711 			    struct bnx2x_vlan_mac_obj *o,
712 			    union bnx2x_classification_ramrod_data *data)
713 {
714 	struct bnx2x_vlan_mac_registry_elem *pos;
715 
716 	DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
717 
718 	list_for_each_entry(pos, &o->head, link)
719 		if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
720 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
721 			return pos;
722 
723 	return NULL;
724 }
725 
726 static struct bnx2x_vlan_mac_registry_elem *
727 	bnx2x_check_vlan_del(struct bnx2x *bp,
728 			     struct bnx2x_vlan_mac_obj *o,
729 			     union bnx2x_classification_ramrod_data *data)
730 {
731 	struct bnx2x_vlan_mac_registry_elem *pos;
732 
733 	DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
734 
735 	list_for_each_entry(pos, &o->head, link)
736 		if (data->vlan.vlan == pos->u.vlan.vlan)
737 			return pos;
738 
739 	return NULL;
740 }
741 
742 static struct bnx2x_vlan_mac_registry_elem *
743 	bnx2x_check_vlan_mac_del(struct bnx2x *bp,
744 				 struct bnx2x_vlan_mac_obj *o,
745 				 union bnx2x_classification_ramrod_data *data)
746 {
747 	struct bnx2x_vlan_mac_registry_elem *pos;
748 
749 	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
750 	   data->vlan_mac.mac, data->vlan_mac.vlan);
751 
752 	list_for_each_entry(pos, &o->head, link)
753 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
754 		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
755 			     ETH_ALEN)) &&
756 		    (data->vlan_mac.is_inner_mac ==
757 		     pos->u.vlan_mac.is_inner_mac))
758 			return pos;
759 
760 	return NULL;
761 }
762 
763 /* check_move() callback */
764 static bool bnx2x_check_move(struct bnx2x *bp,
765 			     struct bnx2x_vlan_mac_obj *src_o,
766 			     struct bnx2x_vlan_mac_obj *dst_o,
767 			     union bnx2x_classification_ramrod_data *data)
768 {
769 	struct bnx2x_vlan_mac_registry_elem *pos;
770 	int rc;
771 
772 	/* Check if we can delete the requested configuration from the first
773 	 * object.
774 	 */
775 	pos = src_o->check_del(bp, src_o, data);
776 
777 	/*  check if configuration can be added */
778 	rc = dst_o->check_add(bp, dst_o, data);
779 
780 	/* If this classification can not be added (is already set)
781 	 * or can't be deleted - return an error.
782 	 */
783 	if (rc || !pos)
784 		return false;
785 
786 	return true;
787 }
788 
789 static bool bnx2x_check_move_always_err(
790 	struct bnx2x *bp,
791 	struct bnx2x_vlan_mac_obj *src_o,
792 	struct bnx2x_vlan_mac_obj *dst_o,
793 	union bnx2x_classification_ramrod_data *data)
794 {
795 	return false;
796 }
797 
798 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
799 {
800 	struct bnx2x_raw_obj *raw = &o->raw;
801 	u8 rx_tx_flag = 0;
802 
803 	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
804 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
805 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
806 
807 	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
808 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
809 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
810 
811 	return rx_tx_flag;
812 }
813 
814 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
815 			  bool add, unsigned char *dev_addr, int index)
816 {
817 	u32 wb_data[2];
818 	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
819 			 NIG_REG_LLH0_FUNC_MEM;
820 
821 	if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
822 		return;
823 
824 	if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
825 		return;
826 
827 	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
828 			 (add ? "ADD" : "DELETE"), index);
829 
830 	if (add) {
831 		/* LLH_FUNC_MEM is a u64 WB register */
832 		reg_offset += 8*index;
833 
834 		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
835 			      (dev_addr[4] <<  8) |  dev_addr[5]);
836 		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
837 
838 		REG_WR_DMAE(bp, reg_offset, wb_data, 2);
839 	}
840 
841 	REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
842 				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
843 }
844 
845 /**
846  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
847  *
848  * @bp:		device handle
849  * @o:		queue for which we want to configure this rule
850  * @add:	if true the command is an ADD command, DEL otherwise
851  * @opcode:	CLASSIFY_RULE_OPCODE_XXX
852  * @hdr:	pointer to a header to setup
853  *
854  */
855 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
856 	struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
857 	struct eth_classify_cmd_header *hdr)
858 {
859 	struct bnx2x_raw_obj *raw = &o->raw;
860 
861 	hdr->client_id = raw->cl_id;
862 	hdr->func_id = raw->func_id;
863 
864 	/* Rx or/and Tx (internal switching) configuration ? */
865 	hdr->cmd_general_data |=
866 		bnx2x_vlan_mac_get_rx_tx_flag(o);
867 
868 	if (add)
869 		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
870 
871 	hdr->cmd_general_data |=
872 		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
873 }
874 
875 /**
876  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
877  *
878  * @cid:	connection id
879  * @type:	BNX2X_FILTER_XXX_PENDING
880  * @hdr:	pointer to header to setup
881  * @rule_cnt:
882  *
883  * currently we always configure one rule and echo field to contain a CID and an
884  * opcode type.
885  */
886 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
887 				struct eth_classify_header *hdr, int rule_cnt)
888 {
889 	hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
890 				(type << BNX2X_SWCID_SHIFT));
891 	hdr->rule_cnt = (u8)rule_cnt;
892 }
893 
894 /* hw_config() callbacks */
895 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
896 				 struct bnx2x_vlan_mac_obj *o,
897 				 struct bnx2x_exeq_elem *elem, int rule_idx,
898 				 int cam_offset)
899 {
900 	struct bnx2x_raw_obj *raw = &o->raw;
901 	struct eth_classify_rules_ramrod_data *data =
902 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
903 	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
904 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
905 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
906 	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
907 	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
908 
909 	/* Set LLH CAM entry: currently only iSCSI and ETH macs are
910 	 * relevant. In addition, current implementation is tuned for a
911 	 * single ETH MAC.
912 	 *
913 	 * When multiple unicast ETH MACs PF configuration in switch
914 	 * independent mode is required (NetQ, multiple netdev MACs,
915 	 * etc.), consider better utilisation of 8 per function MAC
916 	 * entries in the LLH register. There is also
917 	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
918 	 * total number of CAM entries to 16.
919 	 *
920 	 * Currently we won't configure NIG for MACs other than a primary ETH
921 	 * MAC and iSCSI L2 MAC.
922 	 *
923 	 * If this MAC is moving from one Queue to another, no need to change
924 	 * NIG configuration.
925 	 */
926 	if (cmd != BNX2X_VLAN_MAC_MOVE) {
927 		if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
928 			bnx2x_set_mac_in_nig(bp, add, mac,
929 					     BNX2X_LLH_CAM_ISCSI_ETH_LINE);
930 		else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
931 			bnx2x_set_mac_in_nig(bp, add, mac,
932 					     BNX2X_LLH_CAM_ETH_LINE);
933 	}
934 
935 	/* Reset the ramrod data buffer for the first rule */
936 	if (rule_idx == 0)
937 		memset(data, 0, sizeof(*data));
938 
939 	/* Setup a command header */
940 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
941 				      &rule_entry->mac.header);
942 
943 	DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
944 	   (add ? "add" : "delete"), mac, raw->cl_id);
945 
946 	/* Set a MAC itself */
947 	bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
948 			      &rule_entry->mac.mac_mid,
949 			      &rule_entry->mac.mac_lsb, mac);
950 	rule_entry->mac.inner_mac =
951 		cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
952 
953 	/* MOVE: Add a rule that will add this MAC to the target Queue */
954 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
955 		rule_entry++;
956 		rule_cnt++;
957 
958 		/* Setup ramrod data */
959 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
960 					elem->cmd_data.vlan_mac.target_obj,
961 					      true, CLASSIFY_RULE_OPCODE_MAC,
962 					      &rule_entry->mac.header);
963 
964 		/* Set a MAC itself */
965 		bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
966 				      &rule_entry->mac.mac_mid,
967 				      &rule_entry->mac.mac_lsb, mac);
968 		rule_entry->mac.inner_mac =
969 			cpu_to_le16(elem->cmd_data.vlan_mac.
970 						u.mac.is_inner_mac);
971 	}
972 
973 	/* Set the ramrod data header */
974 	/* TODO: take this to the higher level in order to prevent multiple
975 		 writing */
976 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
977 					rule_cnt);
978 }
979 
980 /**
981  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
982  *
983  * @bp:		device handle
984  * @o:		queue
985  * @type:
986  * @cam_offset:	offset in cam memory
987  * @hdr:	pointer to a header to setup
988  *
989  * E1/E1H
990  */
991 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
992 	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
993 	struct mac_configuration_hdr *hdr)
994 {
995 	struct bnx2x_raw_obj *r = &o->raw;
996 
997 	hdr->length = 1;
998 	hdr->offset = (u8)cam_offset;
999 	hdr->client_id = cpu_to_le16(0xff);
1000 	hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
1001 				(type << BNX2X_SWCID_SHIFT));
1002 }
1003 
1004 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
1005 	struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
1006 	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
1007 {
1008 	struct bnx2x_raw_obj *r = &o->raw;
1009 	u32 cl_bit_vec = (1 << r->cl_id);
1010 
1011 	cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
1012 	cfg_entry->pf_id = r->func_id;
1013 	cfg_entry->vlan_id = cpu_to_le16(vlan_id);
1014 
1015 	if (add) {
1016 		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1017 			 T_ETH_MAC_COMMAND_SET);
1018 		SET_FLAG(cfg_entry->flags,
1019 			 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
1020 
1021 		/* Set a MAC in a ramrod data */
1022 		bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1023 				      &cfg_entry->middle_mac_addr,
1024 				      &cfg_entry->lsb_mac_addr, mac);
1025 	} else
1026 		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1027 			 T_ETH_MAC_COMMAND_INVALIDATE);
1028 }
1029 
1030 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
1031 	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
1032 	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
1033 {
1034 	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1035 	struct bnx2x_raw_obj *raw = &o->raw;
1036 
1037 	bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
1038 					 &config->hdr);
1039 	bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
1040 					 cfg_entry);
1041 
1042 	DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
1043 			 (add ? "setting" : "clearing"),
1044 			 mac, raw->cl_id, cam_offset);
1045 }
1046 
1047 /**
1048  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
1049  *
1050  * @bp:		device handle
1051  * @o:		bnx2x_vlan_mac_obj
1052  * @elem:	bnx2x_exeq_elem
1053  * @rule_idx:	rule_idx
1054  * @cam_offset: cam_offset
1055  */
1056 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
1057 				  struct bnx2x_vlan_mac_obj *o,
1058 				  struct bnx2x_exeq_elem *elem, int rule_idx,
1059 				  int cam_offset)
1060 {
1061 	struct bnx2x_raw_obj *raw = &o->raw;
1062 	struct mac_configuration_cmd *config =
1063 		(struct mac_configuration_cmd *)(raw->rdata);
1064 	/* 57710 and 57711 do not support MOVE command,
1065 	 * so it's either ADD or DEL
1066 	 */
1067 	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1068 		true : false;
1069 
1070 	/* Reset the ramrod data buffer */
1071 	memset(config, 0, sizeof(*config));
1072 
1073 	bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
1074 				     cam_offset, add,
1075 				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
1076 				     ETH_VLAN_FILTER_ANY_VLAN, config);
1077 }
1078 
1079 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
1080 				  struct bnx2x_vlan_mac_obj *o,
1081 				  struct bnx2x_exeq_elem *elem, int rule_idx,
1082 				  int cam_offset)
1083 {
1084 	struct bnx2x_raw_obj *raw = &o->raw;
1085 	struct eth_classify_rules_ramrod_data *data =
1086 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1087 	int rule_cnt = rule_idx + 1;
1088 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1089 	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1090 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1091 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1092 
1093 	/* Reset the ramrod data buffer for the first rule */
1094 	if (rule_idx == 0)
1095 		memset(data, 0, sizeof(*data));
1096 
1097 	/* Set a rule header */
1098 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1099 				      &rule_entry->vlan.header);
1100 
1101 	DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1102 			 vlan);
1103 
1104 	/* Set a VLAN itself */
1105 	rule_entry->vlan.vlan = cpu_to_le16(vlan);
1106 
1107 	/* MOVE: Add a rule that will add this MAC to the target Queue */
1108 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
1109 		rule_entry++;
1110 		rule_cnt++;
1111 
1112 		/* Setup ramrod data */
1113 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1114 					elem->cmd_data.vlan_mac.target_obj,
1115 					      true, CLASSIFY_RULE_OPCODE_VLAN,
1116 					      &rule_entry->vlan.header);
1117 
1118 		/* Set a VLAN itself */
1119 		rule_entry->vlan.vlan = cpu_to_le16(vlan);
1120 	}
1121 
1122 	/* Set the ramrod data header */
1123 	/* TODO: take this to the higher level in order to prevent multiple
1124 		 writing */
1125 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1126 					rule_cnt);
1127 }
1128 
1129 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
1130 				      struct bnx2x_vlan_mac_obj *o,
1131 				      struct bnx2x_exeq_elem *elem,
1132 				      int rule_idx, int cam_offset)
1133 {
1134 	struct bnx2x_raw_obj *raw = &o->raw;
1135 	struct eth_classify_rules_ramrod_data *data =
1136 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1137 	int rule_cnt = rule_idx + 1;
1138 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1139 	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1140 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1141 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1142 	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1143 
1144 	/* Reset the ramrod data buffer for the first rule */
1145 	if (rule_idx == 0)
1146 		memset(data, 0, sizeof(*data));
1147 
1148 	/* Set a rule header */
1149 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1150 				      &rule_entry->pair.header);
1151 
1152 	/* Set VLAN and MAC themselves */
1153 	rule_entry->pair.vlan = cpu_to_le16(vlan);
1154 	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1155 			      &rule_entry->pair.mac_mid,
1156 			      &rule_entry->pair.mac_lsb, mac);
1157 	rule_entry->pair.inner_mac =
1158 		cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
1159 	/* MOVE: Add a rule that will add this MAC to the target Queue */
1160 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
1161 		rule_entry++;
1162 		rule_cnt++;
1163 
1164 		/* Setup ramrod data */
1165 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1166 					elem->cmd_data.vlan_mac.target_obj,
1167 					      true, CLASSIFY_RULE_OPCODE_PAIR,
1168 					      &rule_entry->pair.header);
1169 
1170 		/* Set a VLAN itself */
1171 		rule_entry->pair.vlan = cpu_to_le16(vlan);
1172 		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1173 				      &rule_entry->pair.mac_mid,
1174 				      &rule_entry->pair.mac_lsb, mac);
1175 		rule_entry->pair.inner_mac =
1176 			cpu_to_le16(elem->cmd_data.vlan_mac.u.
1177 						vlan_mac.is_inner_mac);
1178 	}
1179 
1180 	/* Set the ramrod data header */
1181 	/* TODO: take this to the higher level in order to prevent multiple
1182 		 writing */
1183 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1184 					rule_cnt);
1185 }
1186 
1187 /**
1188  * bnx2x_set_one_vlan_mac_e1h -
1189  *
1190  * @bp:		device handle
1191  * @o:		bnx2x_vlan_mac_obj
1192  * @elem:	bnx2x_exeq_elem
1193  * @rule_idx:	rule_idx
1194  * @cam_offset:	cam_offset
1195  */
1196 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1197 				       struct bnx2x_vlan_mac_obj *o,
1198 				       struct bnx2x_exeq_elem *elem,
1199 				       int rule_idx, int cam_offset)
1200 {
1201 	struct bnx2x_raw_obj *raw = &o->raw;
1202 	struct mac_configuration_cmd *config =
1203 		(struct mac_configuration_cmd *)(raw->rdata);
1204 	/* 57710 and 57711 do not support MOVE command,
1205 	 * so it's either ADD or DEL
1206 	 */
1207 	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1208 		true : false;
1209 
1210 	/* Reset the ramrod data buffer */
1211 	memset(config, 0, sizeof(*config));
1212 
1213 	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1214 				     cam_offset, add,
1215 				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1216 				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1217 				     ETH_VLAN_FILTER_CLASSIFY, config);
1218 }
1219 
1220 /**
1221  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1222  *
1223  * @bp:		device handle
1224  * @p:		command parameters
1225  * @ppos:	pointer to the cookie
1226  *
1227  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1228  * previously configured elements list.
1229  *
1230  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
1231  * into an account
1232  *
1233  * pointer to the cookie  - that should be given back in the next call to make
1234  * function handle the next element. If *ppos is set to NULL it will restart the
1235  * iterator. If returned *ppos == NULL this means that the last element has been
1236  * handled.
1237  *
1238  */
1239 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1240 			   struct bnx2x_vlan_mac_ramrod_params *p,
1241 			   struct bnx2x_vlan_mac_registry_elem **ppos)
1242 {
1243 	struct bnx2x_vlan_mac_registry_elem *pos;
1244 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1245 
1246 	/* If list is empty - there is nothing to do here */
1247 	if (list_empty(&o->head)) {
1248 		*ppos = NULL;
1249 		return 0;
1250 	}
1251 
1252 	/* make a step... */
1253 	if (*ppos == NULL)
1254 		*ppos = list_first_entry(&o->head,
1255 					 struct bnx2x_vlan_mac_registry_elem,
1256 					 link);
1257 	else
1258 		*ppos = list_next_entry(*ppos, link);
1259 
1260 	pos = *ppos;
1261 
1262 	/* If it's the last step - return NULL */
1263 	if (list_is_last(&pos->link, &o->head))
1264 		*ppos = NULL;
1265 
1266 	/* Prepare a 'user_req' */
1267 	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1268 
1269 	/* Set the command */
1270 	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1271 
1272 	/* Set vlan_mac_flags */
1273 	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1274 
1275 	/* Set a restore bit */
1276 	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1277 
1278 	return bnx2x_config_vlan_mac(bp, p);
1279 }
1280 
1281 /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1282  * pointer to an element with a specific criteria and NULL if such an element
1283  * hasn't been found.
1284  */
1285 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1286 	struct bnx2x_exe_queue_obj *o,
1287 	struct bnx2x_exeq_elem *elem)
1288 {
1289 	struct bnx2x_exeq_elem *pos;
1290 	struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1291 
1292 	/* Check pending for execution commands */
1293 	list_for_each_entry(pos, &o->exe_queue, link)
1294 		if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1295 			      sizeof(*data)) &&
1296 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1297 			return pos;
1298 
1299 	return NULL;
1300 }
1301 
1302 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1303 	struct bnx2x_exe_queue_obj *o,
1304 	struct bnx2x_exeq_elem *elem)
1305 {
1306 	struct bnx2x_exeq_elem *pos;
1307 	struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1308 
1309 	/* Check pending for execution commands */
1310 	list_for_each_entry(pos, &o->exe_queue, link)
1311 		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1312 			      sizeof(*data)) &&
1313 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1314 			return pos;
1315 
1316 	return NULL;
1317 }
1318 
1319 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1320 	struct bnx2x_exe_queue_obj *o,
1321 	struct bnx2x_exeq_elem *elem)
1322 {
1323 	struct bnx2x_exeq_elem *pos;
1324 	struct bnx2x_vlan_mac_ramrod_data *data =
1325 		&elem->cmd_data.vlan_mac.u.vlan_mac;
1326 
1327 	/* Check pending for execution commands */
1328 	list_for_each_entry(pos, &o->exe_queue, link)
1329 		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1330 			      sizeof(*data)) &&
1331 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1332 			return pos;
1333 
1334 	return NULL;
1335 }
1336 
1337 /**
1338  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1339  *
1340  * @bp:		device handle
1341  * @qo:		bnx2x_qable_obj
1342  * @elem:	bnx2x_exeq_elem
1343  *
1344  * Checks that the requested configuration can be added. If yes and if
1345  * requested, consume CAM credit.
1346  *
1347  * The 'validate' is run after the 'optimize'.
1348  *
1349  */
1350 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1351 					      union bnx2x_qable_obj *qo,
1352 					      struct bnx2x_exeq_elem *elem)
1353 {
1354 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1355 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1356 	int rc;
1357 
1358 	/* Check the registry */
1359 	rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1360 	if (rc) {
1361 		DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1362 		return rc;
1363 	}
1364 
1365 	/* Check if there is a pending ADD command for this
1366 	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1367 	 */
1368 	if (exeq->get(exeq, elem)) {
1369 		DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1370 		return -EEXIST;
1371 	}
1372 
1373 	/* TODO: Check the pending MOVE from other objects where this
1374 	 * object is a destination object.
1375 	 */
1376 
1377 	/* Consume the credit if not requested not to */
1378 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1379 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1380 	    o->get_credit(o)))
1381 		return -EINVAL;
1382 
1383 	return 0;
1384 }
1385 
1386 /**
1387  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1388  *
1389  * @bp:		device handle
1390  * @qo:		quable object to check
1391  * @elem:	element that needs to be deleted
1392  *
1393  * Checks that the requested configuration can be deleted. If yes and if
1394  * requested, returns a CAM credit.
1395  *
1396  * The 'validate' is run after the 'optimize'.
1397  */
1398 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1399 					      union bnx2x_qable_obj *qo,
1400 					      struct bnx2x_exeq_elem *elem)
1401 {
1402 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1403 	struct bnx2x_vlan_mac_registry_elem *pos;
1404 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1405 	struct bnx2x_exeq_elem query_elem;
1406 
1407 	/* If this classification can not be deleted (doesn't exist)
1408 	 * - return a BNX2X_EXIST.
1409 	 */
1410 	pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1411 	if (!pos) {
1412 		DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1413 		return -EEXIST;
1414 	}
1415 
1416 	/* Check if there are pending DEL or MOVE commands for this
1417 	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1418 	 */
1419 	memcpy(&query_elem, elem, sizeof(query_elem));
1420 
1421 	/* Check for MOVE commands */
1422 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1423 	if (exeq->get(exeq, &query_elem)) {
1424 		BNX2X_ERR("There is a pending MOVE command already\n");
1425 		return -EINVAL;
1426 	}
1427 
1428 	/* Check for DEL commands */
1429 	if (exeq->get(exeq, elem)) {
1430 		DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1431 		return -EEXIST;
1432 	}
1433 
1434 	/* Return the credit to the credit pool if not requested not to */
1435 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1436 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1437 	    o->put_credit(o))) {
1438 		BNX2X_ERR("Failed to return a credit\n");
1439 		return -EINVAL;
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 /**
1446  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1447  *
1448  * @bp:		device handle
1449  * @qo:		quable object to check (source)
1450  * @elem:	element that needs to be moved
1451  *
1452  * Checks that the requested configuration can be moved. If yes and if
1453  * requested, returns a CAM credit.
1454  *
1455  * The 'validate' is run after the 'optimize'.
1456  */
1457 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1458 					       union bnx2x_qable_obj *qo,
1459 					       struct bnx2x_exeq_elem *elem)
1460 {
1461 	struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1462 	struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1463 	struct bnx2x_exeq_elem query_elem;
1464 	struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1465 	struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1466 
1467 	/* Check if we can perform this operation based on the current registry
1468 	 * state.
1469 	 */
1470 	if (!src_o->check_move(bp, src_o, dest_o,
1471 			       &elem->cmd_data.vlan_mac.u)) {
1472 		DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1473 		return -EINVAL;
1474 	}
1475 
1476 	/* Check if there is an already pending DEL or MOVE command for the
1477 	 * source object or ADD command for a destination object. Return an
1478 	 * error if so.
1479 	 */
1480 	memcpy(&query_elem, elem, sizeof(query_elem));
1481 
1482 	/* Check DEL on source */
1483 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1484 	if (src_exeq->get(src_exeq, &query_elem)) {
1485 		BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1486 		return -EINVAL;
1487 	}
1488 
1489 	/* Check MOVE on source */
1490 	if (src_exeq->get(src_exeq, elem)) {
1491 		DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1492 		return -EEXIST;
1493 	}
1494 
1495 	/* Check ADD on destination */
1496 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1497 	if (dest_exeq->get(dest_exeq, &query_elem)) {
1498 		BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1499 		return -EINVAL;
1500 	}
1501 
1502 	/* Consume the credit if not requested not to */
1503 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1504 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1505 	    dest_o->get_credit(dest_o)))
1506 		return -EINVAL;
1507 
1508 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1509 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1510 	    src_o->put_credit(src_o))) {
1511 		/* return the credit taken from dest... */
1512 		dest_o->put_credit(dest_o);
1513 		return -EINVAL;
1514 	}
1515 
1516 	return 0;
1517 }
1518 
1519 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1520 				   union bnx2x_qable_obj *qo,
1521 				   struct bnx2x_exeq_elem *elem)
1522 {
1523 	switch (elem->cmd_data.vlan_mac.cmd) {
1524 	case BNX2X_VLAN_MAC_ADD:
1525 		return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1526 	case BNX2X_VLAN_MAC_DEL:
1527 		return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1528 	case BNX2X_VLAN_MAC_MOVE:
1529 		return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1530 	default:
1531 		return -EINVAL;
1532 	}
1533 }
1534 
1535 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1536 				  union bnx2x_qable_obj *qo,
1537 				  struct bnx2x_exeq_elem *elem)
1538 {
1539 	int rc = 0;
1540 
1541 	/* If consumption wasn't required, nothing to do */
1542 	if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1543 		     &elem->cmd_data.vlan_mac.vlan_mac_flags))
1544 		return 0;
1545 
1546 	switch (elem->cmd_data.vlan_mac.cmd) {
1547 	case BNX2X_VLAN_MAC_ADD:
1548 	case BNX2X_VLAN_MAC_MOVE:
1549 		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1550 		break;
1551 	case BNX2X_VLAN_MAC_DEL:
1552 		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1553 		break;
1554 	default:
1555 		return -EINVAL;
1556 	}
1557 
1558 	if (rc != true)
1559 		return -EINVAL;
1560 
1561 	return 0;
1562 }
1563 
1564 /**
1565  * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1566  *
1567  * @bp:		device handle
1568  * @o:		bnx2x_vlan_mac_obj
1569  *
1570  */
1571 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1572 			       struct bnx2x_vlan_mac_obj *o)
1573 {
1574 	int cnt = 5000, rc;
1575 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1576 	struct bnx2x_raw_obj *raw = &o->raw;
1577 
1578 	while (cnt--) {
1579 		/* Wait for the current command to complete */
1580 		rc = raw->wait_comp(bp, raw);
1581 		if (rc)
1582 			return rc;
1583 
1584 		/* Wait until there are no pending commands */
1585 		if (!bnx2x_exe_queue_empty(exeq))
1586 			usleep_range(1000, 2000);
1587 		else
1588 			return 0;
1589 	}
1590 
1591 	return -EBUSY;
1592 }
1593 
1594 static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1595 					 struct bnx2x_vlan_mac_obj *o,
1596 					 unsigned long *ramrod_flags)
1597 {
1598 	int rc = 0;
1599 
1600 	spin_lock_bh(&o->exe_queue.lock);
1601 
1602 	DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1603 	rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1604 
1605 	if (rc != 0) {
1606 		__bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1607 
1608 		/* Calling function should not diffrentiate between this case
1609 		 * and the case in which there is already a pending ramrod
1610 		 */
1611 		rc = 1;
1612 	} else {
1613 		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1614 	}
1615 	spin_unlock_bh(&o->exe_queue.lock);
1616 
1617 	return rc;
1618 }
1619 
1620 /**
1621  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1622  *
1623  * @bp:		device handle
1624  * @o:		bnx2x_vlan_mac_obj
1625  * @cqe:
1626  * @cont:	if true schedule next execution chunk
1627  *
1628  */
1629 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1630 				   struct bnx2x_vlan_mac_obj *o,
1631 				   union event_ring_elem *cqe,
1632 				   unsigned long *ramrod_flags)
1633 {
1634 	struct bnx2x_raw_obj *r = &o->raw;
1635 	int rc;
1636 
1637 	/* Clearing the pending list & raw state should be made
1638 	 * atomically (as execution flow assumes they represent the same).
1639 	 */
1640 	spin_lock_bh(&o->exe_queue.lock);
1641 
1642 	/* Reset pending list */
1643 	__bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1644 
1645 	/* Clear pending */
1646 	r->clear_pending(r);
1647 
1648 	spin_unlock_bh(&o->exe_queue.lock);
1649 
1650 	/* If ramrod failed this is most likely a SW bug */
1651 	if (cqe->message.error)
1652 		return -EINVAL;
1653 
1654 	/* Run the next bulk of pending commands if requested */
1655 	if (test_bit(RAMROD_CONT, ramrod_flags)) {
1656 		rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1657 
1658 		if (rc < 0)
1659 			return rc;
1660 	}
1661 
1662 	/* If there is more work to do return PENDING */
1663 	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1664 		return 1;
1665 
1666 	return 0;
1667 }
1668 
1669 /**
1670  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1671  *
1672  * @bp:		device handle
1673  * @o:		bnx2x_qable_obj
1674  * @elem:	bnx2x_exeq_elem
1675  */
1676 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1677 				   union bnx2x_qable_obj *qo,
1678 				   struct bnx2x_exeq_elem *elem)
1679 {
1680 	struct bnx2x_exeq_elem query, *pos;
1681 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1682 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1683 
1684 	memcpy(&query, elem, sizeof(query));
1685 
1686 	switch (elem->cmd_data.vlan_mac.cmd) {
1687 	case BNX2X_VLAN_MAC_ADD:
1688 		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1689 		break;
1690 	case BNX2X_VLAN_MAC_DEL:
1691 		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1692 		break;
1693 	default:
1694 		/* Don't handle anything other than ADD or DEL */
1695 		return 0;
1696 	}
1697 
1698 	/* If we found the appropriate element - delete it */
1699 	pos = exeq->get(exeq, &query);
1700 	if (pos) {
1701 
1702 		/* Return the credit of the optimized command */
1703 		if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1704 			      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1705 			if ((query.cmd_data.vlan_mac.cmd ==
1706 			     BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1707 				BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1708 				return -EINVAL;
1709 			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1710 				BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1711 				return -EINVAL;
1712 			}
1713 		}
1714 
1715 		DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1716 			   (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1717 			   "ADD" : "DEL");
1718 
1719 		list_del(&pos->link);
1720 		bnx2x_exe_queue_free_elem(bp, pos);
1721 		return 1;
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 /**
1728  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1729  *
1730  * @bp:	  device handle
1731  * @o:
1732  * @elem:
1733  * @restore:
1734  * @re:
1735  *
1736  * prepare a registry element according to the current command request.
1737  */
1738 static inline int bnx2x_vlan_mac_get_registry_elem(
1739 	struct bnx2x *bp,
1740 	struct bnx2x_vlan_mac_obj *o,
1741 	struct bnx2x_exeq_elem *elem,
1742 	bool restore,
1743 	struct bnx2x_vlan_mac_registry_elem **re)
1744 {
1745 	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1746 	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1747 
1748 	/* Allocate a new registry element if needed. */
1749 	if (!restore &&
1750 	    ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1751 		reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1752 		if (!reg_elem)
1753 			return -ENOMEM;
1754 
1755 		/* Get a new CAM offset */
1756 		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1757 			/* This shall never happen, because we have checked the
1758 			 * CAM availability in the 'validate'.
1759 			 */
1760 			WARN_ON(1);
1761 			kfree(reg_elem);
1762 			return -EINVAL;
1763 		}
1764 
1765 		DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1766 
1767 		/* Set a VLAN-MAC data */
1768 		memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1769 			  sizeof(reg_elem->u));
1770 
1771 		/* Copy the flags (needed for DEL and RESTORE flows) */
1772 		reg_elem->vlan_mac_flags =
1773 			elem->cmd_data.vlan_mac.vlan_mac_flags;
1774 	} else /* DEL, RESTORE */
1775 		reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1776 
1777 	*re = reg_elem;
1778 	return 0;
1779 }
1780 
1781 /**
1782  * bnx2x_execute_vlan_mac - execute vlan mac command
1783  *
1784  * @bp:			device handle
1785  * @qo:
1786  * @exe_chunk:
1787  * @ramrod_flags:
1788  *
1789  * go and send a ramrod!
1790  */
1791 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1792 				  union bnx2x_qable_obj *qo,
1793 				  struct list_head *exe_chunk,
1794 				  unsigned long *ramrod_flags)
1795 {
1796 	struct bnx2x_exeq_elem *elem;
1797 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1798 	struct bnx2x_raw_obj *r = &o->raw;
1799 	int rc, idx = 0;
1800 	bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1801 	bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1802 	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1803 	enum bnx2x_vlan_mac_cmd cmd;
1804 
1805 	/* If DRIVER_ONLY execution is requested, cleanup a registry
1806 	 * and exit. Otherwise send a ramrod to FW.
1807 	 */
1808 	if (!drv_only) {
1809 		WARN_ON(r->check_pending(r));
1810 
1811 		/* Set pending */
1812 		r->set_pending(r);
1813 
1814 		/* Fill the ramrod data */
1815 		list_for_each_entry(elem, exe_chunk, link) {
1816 			cmd = elem->cmd_data.vlan_mac.cmd;
1817 			/* We will add to the target object in MOVE command, so
1818 			 * change the object for a CAM search.
1819 			 */
1820 			if (cmd == BNX2X_VLAN_MAC_MOVE)
1821 				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1822 			else
1823 				cam_obj = o;
1824 
1825 			rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1826 							      elem, restore,
1827 							      &reg_elem);
1828 			if (rc)
1829 				goto error_exit;
1830 
1831 			WARN_ON(!reg_elem);
1832 
1833 			/* Push a new entry into the registry */
1834 			if (!restore &&
1835 			    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1836 			    (cmd == BNX2X_VLAN_MAC_MOVE)))
1837 				list_add(&reg_elem->link, &cam_obj->head);
1838 
1839 			/* Configure a single command in a ramrod data buffer */
1840 			o->set_one_rule(bp, o, elem, idx,
1841 					reg_elem->cam_offset);
1842 
1843 			/* MOVE command consumes 2 entries in the ramrod data */
1844 			if (cmd == BNX2X_VLAN_MAC_MOVE)
1845 				idx += 2;
1846 			else
1847 				idx++;
1848 		}
1849 
1850 		/* No need for an explicit memory barrier here as long we would
1851 		 * need to ensure the ordering of writing to the SPQ element
1852 		 * and updating of the SPQ producer which involves a memory
1853 		 * read and we will have to put a full memory barrier there
1854 		 * (inside bnx2x_sp_post()).
1855 		 */
1856 
1857 		rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1858 				   U64_HI(r->rdata_mapping),
1859 				   U64_LO(r->rdata_mapping),
1860 				   ETH_CONNECTION_TYPE);
1861 		if (rc)
1862 			goto error_exit;
1863 	}
1864 
1865 	/* Now, when we are done with the ramrod - clean up the registry */
1866 	list_for_each_entry(elem, exe_chunk, link) {
1867 		cmd = elem->cmd_data.vlan_mac.cmd;
1868 		if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1869 		    (cmd == BNX2X_VLAN_MAC_MOVE)) {
1870 			reg_elem = o->check_del(bp, o,
1871 						&elem->cmd_data.vlan_mac.u);
1872 
1873 			WARN_ON(!reg_elem);
1874 
1875 			o->put_cam_offset(o, reg_elem->cam_offset);
1876 			list_del(&reg_elem->link);
1877 			kfree(reg_elem);
1878 		}
1879 	}
1880 
1881 	if (!drv_only)
1882 		return 1;
1883 	else
1884 		return 0;
1885 
1886 error_exit:
1887 	r->clear_pending(r);
1888 
1889 	/* Cleanup a registry in case of a failure */
1890 	list_for_each_entry(elem, exe_chunk, link) {
1891 		cmd = elem->cmd_data.vlan_mac.cmd;
1892 
1893 		if (cmd == BNX2X_VLAN_MAC_MOVE)
1894 			cam_obj = elem->cmd_data.vlan_mac.target_obj;
1895 		else
1896 			cam_obj = o;
1897 
1898 		/* Delete all newly added above entries */
1899 		if (!restore &&
1900 		    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1901 		    (cmd == BNX2X_VLAN_MAC_MOVE))) {
1902 			reg_elem = o->check_del(bp, cam_obj,
1903 						&elem->cmd_data.vlan_mac.u);
1904 			if (reg_elem) {
1905 				list_del(&reg_elem->link);
1906 				kfree(reg_elem);
1907 			}
1908 		}
1909 	}
1910 
1911 	return rc;
1912 }
1913 
1914 static inline int bnx2x_vlan_mac_push_new_cmd(
1915 	struct bnx2x *bp,
1916 	struct bnx2x_vlan_mac_ramrod_params *p)
1917 {
1918 	struct bnx2x_exeq_elem *elem;
1919 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1920 	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1921 
1922 	/* Allocate the execution queue element */
1923 	elem = bnx2x_exe_queue_alloc_elem(bp);
1924 	if (!elem)
1925 		return -ENOMEM;
1926 
1927 	/* Set the command 'length' */
1928 	switch (p->user_req.cmd) {
1929 	case BNX2X_VLAN_MAC_MOVE:
1930 		elem->cmd_len = 2;
1931 		break;
1932 	default:
1933 		elem->cmd_len = 1;
1934 	}
1935 
1936 	/* Fill the object specific info */
1937 	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1938 
1939 	/* Try to add a new command to the pending list */
1940 	return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1941 }
1942 
1943 /**
1944  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1945  *
1946  * @bp:	  device handle
1947  * @p:
1948  *
1949  */
1950 int bnx2x_config_vlan_mac(struct bnx2x *bp,
1951 			   struct bnx2x_vlan_mac_ramrod_params *p)
1952 {
1953 	int rc = 0;
1954 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1955 	unsigned long *ramrod_flags = &p->ramrod_flags;
1956 	bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1957 	struct bnx2x_raw_obj *raw = &o->raw;
1958 
1959 	/*
1960 	 * Add new elements to the execution list for commands that require it.
1961 	 */
1962 	if (!cont) {
1963 		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1964 		if (rc)
1965 			return rc;
1966 	}
1967 
1968 	/* If nothing will be executed further in this iteration we want to
1969 	 * return PENDING if there are pending commands
1970 	 */
1971 	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1972 		rc = 1;
1973 
1974 	if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1975 		DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1976 		raw->clear_pending(raw);
1977 	}
1978 
1979 	/* Execute commands if required */
1980 	if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1981 	    test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1982 		rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1983 						   &p->ramrod_flags);
1984 		if (rc < 0)
1985 			return rc;
1986 	}
1987 
1988 	/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1989 	 * then user want to wait until the last command is done.
1990 	 */
1991 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1992 		/* Wait maximum for the current exe_queue length iterations plus
1993 		 * one (for the current pending command).
1994 		 */
1995 		int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1996 
1997 		while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1998 		       max_iterations--) {
1999 
2000 			/* Wait for the current command to complete */
2001 			rc = raw->wait_comp(bp, raw);
2002 			if (rc)
2003 				return rc;
2004 
2005 			/* Make a next step */
2006 			rc = __bnx2x_vlan_mac_execute_step(bp,
2007 							   p->vlan_mac_obj,
2008 							   &p->ramrod_flags);
2009 			if (rc < 0)
2010 				return rc;
2011 		}
2012 
2013 		return 0;
2014 	}
2015 
2016 	return rc;
2017 }
2018 
2019 /**
2020  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2021  *
2022  * @bp:			device handle
2023  * @o:
2024  * @vlan_mac_flags:
2025  * @ramrod_flags:	execution flags to be used for this deletion
2026  *
2027  * if the last operation has completed successfully and there are no
2028  * more elements left, positive value if the last operation has completed
2029  * successfully and there are more previously configured elements, negative
2030  * value is current operation has failed.
2031  */
2032 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2033 				  struct bnx2x_vlan_mac_obj *o,
2034 				  unsigned long *vlan_mac_flags,
2035 				  unsigned long *ramrod_flags)
2036 {
2037 	struct bnx2x_vlan_mac_registry_elem *pos = NULL;
2038 	struct bnx2x_vlan_mac_ramrod_params p;
2039 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2040 	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2041 	int read_lock;
2042 	int rc = 0;
2043 
2044 	/* Clear pending commands first */
2045 
2046 	spin_lock_bh(&exeq->lock);
2047 
2048 	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
2049 		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
2050 		    *vlan_mac_flags) {
2051 			rc = exeq->remove(bp, exeq->owner, exeq_pos);
2052 			if (rc) {
2053 				BNX2X_ERR("Failed to remove command\n");
2054 				spin_unlock_bh(&exeq->lock);
2055 				return rc;
2056 			}
2057 			list_del(&exeq_pos->link);
2058 			bnx2x_exe_queue_free_elem(bp, exeq_pos);
2059 		}
2060 	}
2061 
2062 	spin_unlock_bh(&exeq->lock);
2063 
2064 	/* Prepare a command request */
2065 	memset(&p, 0, sizeof(p));
2066 	p.vlan_mac_obj = o;
2067 	p.ramrod_flags = *ramrod_flags;
2068 	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
2069 
2070 	/* Add all but the last VLAN-MAC to the execution queue without actually
2071 	 * execution anything.
2072 	 */
2073 	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
2074 	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
2075 	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
2076 
2077 	DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2078 	read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
2079 	if (read_lock != 0)
2080 		return read_lock;
2081 
2082 	list_for_each_entry(pos, &o->head, link) {
2083 		if (pos->vlan_mac_flags == *vlan_mac_flags) {
2084 			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2085 			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2086 			rc = bnx2x_config_vlan_mac(bp, &p);
2087 			if (rc < 0) {
2088 				BNX2X_ERR("Failed to add a new DEL command\n");
2089 				bnx2x_vlan_mac_h_read_unlock(bp, o);
2090 				return rc;
2091 			}
2092 		}
2093 	}
2094 
2095 	DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2096 	bnx2x_vlan_mac_h_read_unlock(bp, o);
2097 
2098 	p.ramrod_flags = *ramrod_flags;
2099 	__set_bit(RAMROD_CONT, &p.ramrod_flags);
2100 
2101 	return bnx2x_config_vlan_mac(bp, &p);
2102 }
2103 
2104 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
2105 	u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
2106 	unsigned long *pstate, bnx2x_obj_type type)
2107 {
2108 	raw->func_id = func_id;
2109 	raw->cid = cid;
2110 	raw->cl_id = cl_id;
2111 	raw->rdata = rdata;
2112 	raw->rdata_mapping = rdata_mapping;
2113 	raw->state = state;
2114 	raw->pstate = pstate;
2115 	raw->obj_type = type;
2116 	raw->check_pending = bnx2x_raw_check_pending;
2117 	raw->clear_pending = bnx2x_raw_clear_pending;
2118 	raw->set_pending = bnx2x_raw_set_pending;
2119 	raw->wait_comp = bnx2x_raw_wait;
2120 }
2121 
2122 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
2123 	u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
2124 	int state, unsigned long *pstate, bnx2x_obj_type type,
2125 	struct bnx2x_credit_pool_obj *macs_pool,
2126 	struct bnx2x_credit_pool_obj *vlans_pool)
2127 {
2128 	INIT_LIST_HEAD(&o->head);
2129 	o->head_reader = 0;
2130 	o->head_exe_request = false;
2131 	o->saved_ramrod_flags = 0;
2132 
2133 	o->macs_pool = macs_pool;
2134 	o->vlans_pool = vlans_pool;
2135 
2136 	o->delete_all = bnx2x_vlan_mac_del_all;
2137 	o->restore = bnx2x_vlan_mac_restore;
2138 	o->complete = bnx2x_complete_vlan_mac;
2139 	o->wait = bnx2x_wait_vlan_mac;
2140 
2141 	bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2142 			   state, pstate, type);
2143 }
2144 
2145 void bnx2x_init_mac_obj(struct bnx2x *bp,
2146 			struct bnx2x_vlan_mac_obj *mac_obj,
2147 			u8 cl_id, u32 cid, u8 func_id, void *rdata,
2148 			dma_addr_t rdata_mapping, int state,
2149 			unsigned long *pstate, bnx2x_obj_type type,
2150 			struct bnx2x_credit_pool_obj *macs_pool)
2151 {
2152 	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
2153 
2154 	bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2155 				   rdata_mapping, state, pstate, type,
2156 				   macs_pool, NULL);
2157 
2158 	/* CAM credit pool handling */
2159 	mac_obj->get_credit = bnx2x_get_credit_mac;
2160 	mac_obj->put_credit = bnx2x_put_credit_mac;
2161 	mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2162 	mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2163 
2164 	if (CHIP_IS_E1x(bp)) {
2165 		mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
2166 		mac_obj->check_del         = bnx2x_check_mac_del;
2167 		mac_obj->check_add         = bnx2x_check_mac_add;
2168 		mac_obj->check_move        = bnx2x_check_move_always_err;
2169 		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2170 
2171 		/* Exe Queue */
2172 		bnx2x_exe_queue_init(bp,
2173 				     &mac_obj->exe_queue, 1, qable_obj,
2174 				     bnx2x_validate_vlan_mac,
2175 				     bnx2x_remove_vlan_mac,
2176 				     bnx2x_optimize_vlan_mac,
2177 				     bnx2x_execute_vlan_mac,
2178 				     bnx2x_exeq_get_mac);
2179 	} else {
2180 		mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
2181 		mac_obj->check_del         = bnx2x_check_mac_del;
2182 		mac_obj->check_add         = bnx2x_check_mac_add;
2183 		mac_obj->check_move        = bnx2x_check_move;
2184 		mac_obj->ramrod_cmd        =
2185 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2186 		mac_obj->get_n_elements    = bnx2x_get_n_elements;
2187 
2188 		/* Exe Queue */
2189 		bnx2x_exe_queue_init(bp,
2190 				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2191 				     qable_obj, bnx2x_validate_vlan_mac,
2192 				     bnx2x_remove_vlan_mac,
2193 				     bnx2x_optimize_vlan_mac,
2194 				     bnx2x_execute_vlan_mac,
2195 				     bnx2x_exeq_get_mac);
2196 	}
2197 }
2198 
2199 void bnx2x_init_vlan_obj(struct bnx2x *bp,
2200 			 struct bnx2x_vlan_mac_obj *vlan_obj,
2201 			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2202 			 dma_addr_t rdata_mapping, int state,
2203 			 unsigned long *pstate, bnx2x_obj_type type,
2204 			 struct bnx2x_credit_pool_obj *vlans_pool)
2205 {
2206 	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2207 
2208 	bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2209 				   rdata_mapping, state, pstate, type, NULL,
2210 				   vlans_pool);
2211 
2212 	vlan_obj->get_credit = bnx2x_get_credit_vlan;
2213 	vlan_obj->put_credit = bnx2x_put_credit_vlan;
2214 	vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2215 	vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2216 
2217 	if (CHIP_IS_E1x(bp)) {
2218 		BNX2X_ERR("Do not support chips others than E2 and newer\n");
2219 		BUG();
2220 	} else {
2221 		vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
2222 		vlan_obj->check_del         = bnx2x_check_vlan_del;
2223 		vlan_obj->check_add         = bnx2x_check_vlan_add;
2224 		vlan_obj->check_move        = bnx2x_check_move;
2225 		vlan_obj->ramrod_cmd        =
2226 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2227 		vlan_obj->get_n_elements    = bnx2x_get_n_elements;
2228 
2229 		/* Exe Queue */
2230 		bnx2x_exe_queue_init(bp,
2231 				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2232 				     qable_obj, bnx2x_validate_vlan_mac,
2233 				     bnx2x_remove_vlan_mac,
2234 				     bnx2x_optimize_vlan_mac,
2235 				     bnx2x_execute_vlan_mac,
2236 				     bnx2x_exeq_get_vlan);
2237 	}
2238 }
2239 
2240 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2241 			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2242 			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
2243 			     dma_addr_t rdata_mapping, int state,
2244 			     unsigned long *pstate, bnx2x_obj_type type,
2245 			     struct bnx2x_credit_pool_obj *macs_pool,
2246 			     struct bnx2x_credit_pool_obj *vlans_pool)
2247 {
2248 	union bnx2x_qable_obj *qable_obj =
2249 		(union bnx2x_qable_obj *)vlan_mac_obj;
2250 
2251 	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2252 				   rdata_mapping, state, pstate, type,
2253 				   macs_pool, vlans_pool);
2254 
2255 	/* CAM pool handling */
2256 	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2257 	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2258 	/* CAM offset is relevant for 57710 and 57711 chips only which have a
2259 	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2260 	 * will be taken from MACs' pool object only.
2261 	 */
2262 	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2263 	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2264 
2265 	if (CHIP_IS_E1(bp)) {
2266 		BNX2X_ERR("Do not support chips others than E2\n");
2267 		BUG();
2268 	} else if (CHIP_IS_E1H(bp)) {
2269 		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
2270 		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2271 		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2272 		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
2273 		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2274 
2275 		/* Exe Queue */
2276 		bnx2x_exe_queue_init(bp,
2277 				     &vlan_mac_obj->exe_queue, 1, qable_obj,
2278 				     bnx2x_validate_vlan_mac,
2279 				     bnx2x_remove_vlan_mac,
2280 				     bnx2x_optimize_vlan_mac,
2281 				     bnx2x_execute_vlan_mac,
2282 				     bnx2x_exeq_get_vlan_mac);
2283 	} else {
2284 		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
2285 		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2286 		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2287 		vlan_mac_obj->check_move        = bnx2x_check_move;
2288 		vlan_mac_obj->ramrod_cmd        =
2289 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2290 
2291 		/* Exe Queue */
2292 		bnx2x_exe_queue_init(bp,
2293 				     &vlan_mac_obj->exe_queue,
2294 				     CLASSIFY_RULES_COUNT,
2295 				     qable_obj, bnx2x_validate_vlan_mac,
2296 				     bnx2x_remove_vlan_mac,
2297 				     bnx2x_optimize_vlan_mac,
2298 				     bnx2x_execute_vlan_mac,
2299 				     bnx2x_exeq_get_vlan_mac);
2300 	}
2301 }
2302 
2303 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2304 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2305 			struct tstorm_eth_mac_filter_config *mac_filters,
2306 			u16 pf_id)
2307 {
2308 	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2309 
2310 	u32 addr = BAR_TSTRORM_INTMEM +
2311 			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2312 
2313 	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2314 }
2315 
2316 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2317 				 struct bnx2x_rx_mode_ramrod_params *p)
2318 {
2319 	/* update the bp MAC filter structure */
2320 	u32 mask = (1 << p->cl_id);
2321 
2322 	struct tstorm_eth_mac_filter_config *mac_filters =
2323 		(struct tstorm_eth_mac_filter_config *)p->rdata;
2324 
2325 	/* initial setting is drop-all */
2326 	u8 drop_all_ucast = 1, drop_all_mcast = 1;
2327 	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2328 	u8 unmatched_unicast = 0;
2329 
2330     /* In e1x there we only take into account rx accept flag since tx switching
2331      * isn't enabled. */
2332 	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2333 		/* accept matched ucast */
2334 		drop_all_ucast = 0;
2335 
2336 	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2337 		/* accept matched mcast */
2338 		drop_all_mcast = 0;
2339 
2340 	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2341 		/* accept all mcast */
2342 		drop_all_ucast = 0;
2343 		accp_all_ucast = 1;
2344 	}
2345 	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2346 		/* accept all mcast */
2347 		drop_all_mcast = 0;
2348 		accp_all_mcast = 1;
2349 	}
2350 	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2351 		/* accept (all) bcast */
2352 		accp_all_bcast = 1;
2353 	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2354 		/* accept unmatched unicasts */
2355 		unmatched_unicast = 1;
2356 
2357 	mac_filters->ucast_drop_all = drop_all_ucast ?
2358 		mac_filters->ucast_drop_all | mask :
2359 		mac_filters->ucast_drop_all & ~mask;
2360 
2361 	mac_filters->mcast_drop_all = drop_all_mcast ?
2362 		mac_filters->mcast_drop_all | mask :
2363 		mac_filters->mcast_drop_all & ~mask;
2364 
2365 	mac_filters->ucast_accept_all = accp_all_ucast ?
2366 		mac_filters->ucast_accept_all | mask :
2367 		mac_filters->ucast_accept_all & ~mask;
2368 
2369 	mac_filters->mcast_accept_all = accp_all_mcast ?
2370 		mac_filters->mcast_accept_all | mask :
2371 		mac_filters->mcast_accept_all & ~mask;
2372 
2373 	mac_filters->bcast_accept_all = accp_all_bcast ?
2374 		mac_filters->bcast_accept_all | mask :
2375 		mac_filters->bcast_accept_all & ~mask;
2376 
2377 	mac_filters->unmatched_unicast = unmatched_unicast ?
2378 		mac_filters->unmatched_unicast | mask :
2379 		mac_filters->unmatched_unicast & ~mask;
2380 
2381 	DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2382 			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2383 	   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2384 	   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2385 	   mac_filters->bcast_accept_all);
2386 
2387 	/* write the MAC filter structure*/
2388 	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
2389 
2390 	/* The operation is completed */
2391 	clear_bit(p->state, p->pstate);
2392 	smp_mb__after_clear_bit();
2393 
2394 	return 0;
2395 }
2396 
2397 /* Setup ramrod data */
2398 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2399 				struct eth_classify_header *hdr,
2400 				u8 rule_cnt)
2401 {
2402 	hdr->echo = cpu_to_le32(cid);
2403 	hdr->rule_cnt = rule_cnt;
2404 }
2405 
2406 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2407 				unsigned long *accept_flags,
2408 				struct eth_filter_rules_cmd *cmd,
2409 				bool clear_accept_all)
2410 {
2411 	u16 state;
2412 
2413 	/* start with 'drop-all' */
2414 	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2415 		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2416 
2417 	if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2418 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2419 
2420 	if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2421 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2422 
2423 	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2424 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2425 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2426 	}
2427 
2428 	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2429 		state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2430 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2431 	}
2432 
2433 	if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2434 		state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2435 
2436 	if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2437 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2438 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2439 	}
2440 
2441 	if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2442 		state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2443 
2444 	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2445 	if (clear_accept_all) {
2446 		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2447 		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2448 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2449 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2450 	}
2451 
2452 	cmd->state = cpu_to_le16(state);
2453 }
2454 
2455 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2456 				struct bnx2x_rx_mode_ramrod_params *p)
2457 {
2458 	struct eth_filter_rules_ramrod_data *data = p->rdata;
2459 	int rc;
2460 	u8 rule_idx = 0;
2461 
2462 	/* Reset the ramrod data buffer */
2463 	memset(data, 0, sizeof(*data));
2464 
2465 	/* Setup ramrod data */
2466 
2467 	/* Tx (internal switching) */
2468 	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2469 		data->rules[rule_idx].client_id = p->cl_id;
2470 		data->rules[rule_idx].func_id = p->func_id;
2471 
2472 		data->rules[rule_idx].cmd_general_data =
2473 			ETH_FILTER_RULES_CMD_TX_CMD;
2474 
2475 		bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2476 					       &(data->rules[rule_idx++]),
2477 					       false);
2478 	}
2479 
2480 	/* Rx */
2481 	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2482 		data->rules[rule_idx].client_id = p->cl_id;
2483 		data->rules[rule_idx].func_id = p->func_id;
2484 
2485 		data->rules[rule_idx].cmd_general_data =
2486 			ETH_FILTER_RULES_CMD_RX_CMD;
2487 
2488 		bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2489 					       &(data->rules[rule_idx++]),
2490 					       false);
2491 	}
2492 
2493 	/* If FCoE Queue configuration has been requested configure the Rx and
2494 	 * internal switching modes for this queue in separate rules.
2495 	 *
2496 	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2497 	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2498 	 */
2499 	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2500 		/*  Tx (internal switching) */
2501 		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2502 			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2503 			data->rules[rule_idx].func_id = p->func_id;
2504 
2505 			data->rules[rule_idx].cmd_general_data =
2506 						ETH_FILTER_RULES_CMD_TX_CMD;
2507 
2508 			bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2509 						       &(data->rules[rule_idx]),
2510 						       true);
2511 			rule_idx++;
2512 		}
2513 
2514 		/* Rx */
2515 		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2516 			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2517 			data->rules[rule_idx].func_id = p->func_id;
2518 
2519 			data->rules[rule_idx].cmd_general_data =
2520 						ETH_FILTER_RULES_CMD_RX_CMD;
2521 
2522 			bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2523 						       &(data->rules[rule_idx]),
2524 						       true);
2525 			rule_idx++;
2526 		}
2527 	}
2528 
2529 	/* Set the ramrod header (most importantly - number of rules to
2530 	 * configure).
2531 	 */
2532 	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2533 
2534 	DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2535 			 data->header.rule_cnt, p->rx_accept_flags,
2536 			 p->tx_accept_flags);
2537 
2538 	/* No need for an explicit memory barrier here as long we would
2539 	 * need to ensure the ordering of writing to the SPQ element
2540 	 * and updating of the SPQ producer which involves a memory
2541 	 * read and we will have to put a full memory barrier there
2542 	 * (inside bnx2x_sp_post()).
2543 	 */
2544 
2545 	/* Send a ramrod */
2546 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2547 			   U64_HI(p->rdata_mapping),
2548 			   U64_LO(p->rdata_mapping),
2549 			   ETH_CONNECTION_TYPE);
2550 	if (rc)
2551 		return rc;
2552 
2553 	/* Ramrod completion is pending */
2554 	return 1;
2555 }
2556 
2557 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2558 				      struct bnx2x_rx_mode_ramrod_params *p)
2559 {
2560 	return bnx2x_state_wait(bp, p->state, p->pstate);
2561 }
2562 
2563 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2564 				    struct bnx2x_rx_mode_ramrod_params *p)
2565 {
2566 	/* Do nothing */
2567 	return 0;
2568 }
2569 
2570 int bnx2x_config_rx_mode(struct bnx2x *bp,
2571 			 struct bnx2x_rx_mode_ramrod_params *p)
2572 {
2573 	int rc;
2574 
2575 	/* Configure the new classification in the chip */
2576 	rc = p->rx_mode_obj->config_rx_mode(bp, p);
2577 	if (rc < 0)
2578 		return rc;
2579 
2580 	/* Wait for a ramrod completion if was requested */
2581 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2582 		rc = p->rx_mode_obj->wait_comp(bp, p);
2583 		if (rc)
2584 			return rc;
2585 	}
2586 
2587 	return rc;
2588 }
2589 
2590 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2591 			    struct bnx2x_rx_mode_obj *o)
2592 {
2593 	if (CHIP_IS_E1x(bp)) {
2594 		o->wait_comp      = bnx2x_empty_rx_mode_wait;
2595 		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2596 	} else {
2597 		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2598 		o->config_rx_mode = bnx2x_set_rx_mode_e2;
2599 	}
2600 }
2601 
2602 /********************* Multicast verbs: SET, CLEAR ****************************/
2603 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2604 {
2605 	return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2606 }
2607 
2608 struct bnx2x_mcast_mac_elem {
2609 	struct list_head link;
2610 	u8 mac[ETH_ALEN];
2611 	u8 pad[2]; /* For a natural alignment of the following buffer */
2612 };
2613 
2614 struct bnx2x_pending_mcast_cmd {
2615 	struct list_head link;
2616 	int type; /* BNX2X_MCAST_CMD_X */
2617 	union {
2618 		struct list_head macs_head;
2619 		u32 macs_num; /* Needed for DEL command */
2620 		int next_bin; /* Needed for RESTORE flow with aprox match */
2621 	} data;
2622 
2623 	bool done; /* set to true, when the command has been handled,
2624 		    * practically used in 57712 handling only, where one pending
2625 		    * command may be handled in a few operations. As long as for
2626 		    * other chips every operation handling is completed in a
2627 		    * single ramrod, there is no need to utilize this field.
2628 		    */
2629 };
2630 
2631 static int bnx2x_mcast_wait(struct bnx2x *bp,
2632 			    struct bnx2x_mcast_obj *o)
2633 {
2634 	if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2635 			o->raw.wait_comp(bp, &o->raw))
2636 		return -EBUSY;
2637 
2638 	return 0;
2639 }
2640 
2641 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2642 				   struct bnx2x_mcast_obj *o,
2643 				   struct bnx2x_mcast_ramrod_params *p,
2644 				   enum bnx2x_mcast_cmd cmd)
2645 {
2646 	int total_sz;
2647 	struct bnx2x_pending_mcast_cmd *new_cmd;
2648 	struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2649 	struct bnx2x_mcast_list_elem *pos;
2650 	int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2651 			     p->mcast_list_len : 0);
2652 
2653 	/* If the command is empty ("handle pending commands only"), break */
2654 	if (!p->mcast_list_len)
2655 		return 0;
2656 
2657 	total_sz = sizeof(*new_cmd) +
2658 		macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2659 
2660 	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2661 	new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2662 
2663 	if (!new_cmd)
2664 		return -ENOMEM;
2665 
2666 	DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2667 	   cmd, macs_list_len);
2668 
2669 	INIT_LIST_HEAD(&new_cmd->data.macs_head);
2670 
2671 	new_cmd->type = cmd;
2672 	new_cmd->done = false;
2673 
2674 	switch (cmd) {
2675 	case BNX2X_MCAST_CMD_ADD:
2676 		cur_mac = (struct bnx2x_mcast_mac_elem *)
2677 			  ((u8 *)new_cmd + sizeof(*new_cmd));
2678 
2679 		/* Push the MACs of the current command into the pending command
2680 		 * MACs list: FIFO
2681 		 */
2682 		list_for_each_entry(pos, &p->mcast_list, link) {
2683 			memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2684 			list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2685 			cur_mac++;
2686 		}
2687 
2688 		break;
2689 
2690 	case BNX2X_MCAST_CMD_DEL:
2691 		new_cmd->data.macs_num = p->mcast_list_len;
2692 		break;
2693 
2694 	case BNX2X_MCAST_CMD_RESTORE:
2695 		new_cmd->data.next_bin = 0;
2696 		break;
2697 
2698 	default:
2699 		kfree(new_cmd);
2700 		BNX2X_ERR("Unknown command: %d\n", cmd);
2701 		return -EINVAL;
2702 	}
2703 
2704 	/* Push the new pending command to the tail of the pending list: FIFO */
2705 	list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2706 
2707 	o->set_sched(o);
2708 
2709 	return 1;
2710 }
2711 
2712 /**
2713  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2714  *
2715  * @o:
2716  * @last:	index to start looking from (including)
2717  *
2718  * returns the next found (set) bin or a negative value if none is found.
2719  */
2720 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2721 {
2722 	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2723 
2724 	for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2725 		if (o->registry.aprox_match.vec[i])
2726 			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2727 				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2728 				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2729 						       vec, cur_bit)) {
2730 					return cur_bit;
2731 				}
2732 			}
2733 		inner_start = 0;
2734 	}
2735 
2736 	/* None found */
2737 	return -1;
2738 }
2739 
2740 /**
2741  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2742  *
2743  * @o:
2744  *
2745  * returns the index of the found bin or -1 if none is found
2746  */
2747 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2748 {
2749 	int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2750 
2751 	if (cur_bit >= 0)
2752 		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2753 
2754 	return cur_bit;
2755 }
2756 
2757 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2758 {
2759 	struct bnx2x_raw_obj *raw = &o->raw;
2760 	u8 rx_tx_flag = 0;
2761 
2762 	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2763 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2764 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2765 
2766 	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2767 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2768 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2769 
2770 	return rx_tx_flag;
2771 }
2772 
2773 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2774 					struct bnx2x_mcast_obj *o, int idx,
2775 					union bnx2x_mcast_config_data *cfg_data,
2776 					enum bnx2x_mcast_cmd cmd)
2777 {
2778 	struct bnx2x_raw_obj *r = &o->raw;
2779 	struct eth_multicast_rules_ramrod_data *data =
2780 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2781 	u8 func_id = r->func_id;
2782 	u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2783 	int bin;
2784 
2785 	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2786 		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2787 
2788 	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2789 
2790 	/* Get a bin and update a bins' vector */
2791 	switch (cmd) {
2792 	case BNX2X_MCAST_CMD_ADD:
2793 		bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2794 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2795 		break;
2796 
2797 	case BNX2X_MCAST_CMD_DEL:
2798 		/* If there were no more bins to clear
2799 		 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2800 		 * clear any (0xff) bin.
2801 		 * See bnx2x_mcast_validate_e2() for explanation when it may
2802 		 * happen.
2803 		 */
2804 		bin = bnx2x_mcast_clear_first_bin(o);
2805 		break;
2806 
2807 	case BNX2X_MCAST_CMD_RESTORE:
2808 		bin = cfg_data->bin;
2809 		break;
2810 
2811 	default:
2812 		BNX2X_ERR("Unknown command: %d\n", cmd);
2813 		return;
2814 	}
2815 
2816 	DP(BNX2X_MSG_SP, "%s bin %d\n",
2817 			 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2818 			 "Setting"  : "Clearing"), bin);
2819 
2820 	data->rules[idx].bin_id    = (u8)bin;
2821 	data->rules[idx].func_id   = func_id;
2822 	data->rules[idx].engine_id = o->engine_id;
2823 }
2824 
2825 /**
2826  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2827  *
2828  * @bp:		device handle
2829  * @o:
2830  * @start_bin:	index in the registry to start from (including)
2831  * @rdata_idx:	index in the ramrod data to start from
2832  *
2833  * returns last handled bin index or -1 if all bins have been handled
2834  */
2835 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2836 	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2837 	int *rdata_idx)
2838 {
2839 	int cur_bin, cnt = *rdata_idx;
2840 	union bnx2x_mcast_config_data cfg_data = {NULL};
2841 
2842 	/* go through the registry and configure the bins from it */
2843 	for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2844 	    cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2845 
2846 		cfg_data.bin = (u8)cur_bin;
2847 		o->set_one_rule(bp, o, cnt, &cfg_data,
2848 				BNX2X_MCAST_CMD_RESTORE);
2849 
2850 		cnt++;
2851 
2852 		DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2853 
2854 		/* Break if we reached the maximum number
2855 		 * of rules.
2856 		 */
2857 		if (cnt >= o->max_cmd_len)
2858 			break;
2859 	}
2860 
2861 	*rdata_idx = cnt;
2862 
2863 	return cur_bin;
2864 }
2865 
2866 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2867 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2868 	int *line_idx)
2869 {
2870 	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2871 	int cnt = *line_idx;
2872 	union bnx2x_mcast_config_data cfg_data = {NULL};
2873 
2874 	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2875 				 link) {
2876 
2877 		cfg_data.mac = &pmac_pos->mac[0];
2878 		o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2879 
2880 		cnt++;
2881 
2882 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2883 		   pmac_pos->mac);
2884 
2885 		list_del(&pmac_pos->link);
2886 
2887 		/* Break if we reached the maximum number
2888 		 * of rules.
2889 		 */
2890 		if (cnt >= o->max_cmd_len)
2891 			break;
2892 	}
2893 
2894 	*line_idx = cnt;
2895 
2896 	/* if no more MACs to configure - we are done */
2897 	if (list_empty(&cmd_pos->data.macs_head))
2898 		cmd_pos->done = true;
2899 }
2900 
2901 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2902 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2903 	int *line_idx)
2904 {
2905 	int cnt = *line_idx;
2906 
2907 	while (cmd_pos->data.macs_num) {
2908 		o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2909 
2910 		cnt++;
2911 
2912 		cmd_pos->data.macs_num--;
2913 
2914 		  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2915 				   cmd_pos->data.macs_num, cnt);
2916 
2917 		/* Break if we reached the maximum
2918 		 * number of rules.
2919 		 */
2920 		if (cnt >= o->max_cmd_len)
2921 			break;
2922 	}
2923 
2924 	*line_idx = cnt;
2925 
2926 	/* If we cleared all bins - we are done */
2927 	if (!cmd_pos->data.macs_num)
2928 		cmd_pos->done = true;
2929 }
2930 
2931 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2932 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2933 	int *line_idx)
2934 {
2935 	cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2936 						line_idx);
2937 
2938 	if (cmd_pos->data.next_bin < 0)
2939 		/* If o->set_restore returned -1 we are done */
2940 		cmd_pos->done = true;
2941 	else
2942 		/* Start from the next bin next time */
2943 		cmd_pos->data.next_bin++;
2944 }
2945 
2946 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2947 				struct bnx2x_mcast_ramrod_params *p)
2948 {
2949 	struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2950 	int cnt = 0;
2951 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2952 
2953 	list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2954 				 link) {
2955 		switch (cmd_pos->type) {
2956 		case BNX2X_MCAST_CMD_ADD:
2957 			bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2958 			break;
2959 
2960 		case BNX2X_MCAST_CMD_DEL:
2961 			bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2962 			break;
2963 
2964 		case BNX2X_MCAST_CMD_RESTORE:
2965 			bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2966 							   &cnt);
2967 			break;
2968 
2969 		default:
2970 			BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2971 			return -EINVAL;
2972 		}
2973 
2974 		/* If the command has been completed - remove it from the list
2975 		 * and free the memory
2976 		 */
2977 		if (cmd_pos->done) {
2978 			list_del(&cmd_pos->link);
2979 			kfree(cmd_pos);
2980 		}
2981 
2982 		/* Break if we reached the maximum number of rules */
2983 		if (cnt >= o->max_cmd_len)
2984 			break;
2985 	}
2986 
2987 	return cnt;
2988 }
2989 
2990 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2991 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2992 	int *line_idx)
2993 {
2994 	struct bnx2x_mcast_list_elem *mlist_pos;
2995 	union bnx2x_mcast_config_data cfg_data = {NULL};
2996 	int cnt = *line_idx;
2997 
2998 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2999 		cfg_data.mac = mlist_pos->mac;
3000 		o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
3001 
3002 		cnt++;
3003 
3004 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3005 		   mlist_pos->mac);
3006 	}
3007 
3008 	*line_idx = cnt;
3009 }
3010 
3011 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
3012 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3013 	int *line_idx)
3014 {
3015 	int cnt = *line_idx, i;
3016 
3017 	for (i = 0; i < p->mcast_list_len; i++) {
3018 		o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
3019 
3020 		cnt++;
3021 
3022 		DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
3023 				 p->mcast_list_len - i - 1);
3024 	}
3025 
3026 	*line_idx = cnt;
3027 }
3028 
3029 /**
3030  * bnx2x_mcast_handle_current_cmd -
3031  *
3032  * @bp:		device handle
3033  * @p:
3034  * @cmd:
3035  * @start_cnt:	first line in the ramrod data that may be used
3036  *
3037  * This function is called iff there is enough place for the current command in
3038  * the ramrod data.
3039  * Returns number of lines filled in the ramrod data in total.
3040  */
3041 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
3042 			struct bnx2x_mcast_ramrod_params *p,
3043 			enum bnx2x_mcast_cmd cmd,
3044 			int start_cnt)
3045 {
3046 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3047 	int cnt = start_cnt;
3048 
3049 	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3050 
3051 	switch (cmd) {
3052 	case BNX2X_MCAST_CMD_ADD:
3053 		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
3054 		break;
3055 
3056 	case BNX2X_MCAST_CMD_DEL:
3057 		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
3058 		break;
3059 
3060 	case BNX2X_MCAST_CMD_RESTORE:
3061 		o->hdl_restore(bp, o, 0, &cnt);
3062 		break;
3063 
3064 	default:
3065 		BNX2X_ERR("Unknown command: %d\n", cmd);
3066 		return -EINVAL;
3067 	}
3068 
3069 	/* The current command has been handled */
3070 	p->mcast_list_len = 0;
3071 
3072 	return cnt;
3073 }
3074 
3075 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
3076 				   struct bnx2x_mcast_ramrod_params *p,
3077 				   enum bnx2x_mcast_cmd cmd)
3078 {
3079 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3080 	int reg_sz = o->get_registry_size(o);
3081 
3082 	switch (cmd) {
3083 	/* DEL command deletes all currently configured MACs */
3084 	case BNX2X_MCAST_CMD_DEL:
3085 		o->set_registry_size(o, 0);
3086 		/* Don't break */
3087 
3088 	/* RESTORE command will restore the entire multicast configuration */
3089 	case BNX2X_MCAST_CMD_RESTORE:
3090 		/* Here we set the approximate amount of work to do, which in
3091 		 * fact may be only less as some MACs in postponed ADD
3092 		 * command(s) scheduled before this command may fall into
3093 		 * the same bin and the actual number of bins set in the
3094 		 * registry would be less than we estimated here. See
3095 		 * bnx2x_mcast_set_one_rule_e2() for further details.
3096 		 */
3097 		p->mcast_list_len = reg_sz;
3098 		break;
3099 
3100 	case BNX2X_MCAST_CMD_ADD:
3101 	case BNX2X_MCAST_CMD_CONT:
3102 		/* Here we assume that all new MACs will fall into new bins.
3103 		 * However we will correct the real registry size after we
3104 		 * handle all pending commands.
3105 		 */
3106 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
3107 		break;
3108 
3109 	default:
3110 		BNX2X_ERR("Unknown command: %d\n", cmd);
3111 		return -EINVAL;
3112 	}
3113 
3114 	/* Increase the total number of MACs pending to be configured */
3115 	o->total_pending_num += p->mcast_list_len;
3116 
3117 	return 0;
3118 }
3119 
3120 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
3121 				      struct bnx2x_mcast_ramrod_params *p,
3122 				      int old_num_bins)
3123 {
3124 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3125 
3126 	o->set_registry_size(o, old_num_bins);
3127 	o->total_pending_num -= p->mcast_list_len;
3128 }
3129 
3130 /**
3131  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
3132  *
3133  * @bp:		device handle
3134  * @p:
3135  * @len:	number of rules to handle
3136  */
3137 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
3138 					struct bnx2x_mcast_ramrod_params *p,
3139 					u8 len)
3140 {
3141 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3142 	struct eth_multicast_rules_ramrod_data *data =
3143 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
3144 
3145 	data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3146 					(BNX2X_FILTER_MCAST_PENDING <<
3147 					 BNX2X_SWCID_SHIFT));
3148 	data->header.rule_cnt = len;
3149 }
3150 
3151 /**
3152  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3153  *
3154  * @bp:		device handle
3155  * @o:
3156  *
3157  * Recalculate the actual number of set bins in the registry using Brian
3158  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3159  *
3160  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
3161  */
3162 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
3163 						  struct bnx2x_mcast_obj *o)
3164 {
3165 	int i, cnt = 0;
3166 	u64 elem;
3167 
3168 	for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
3169 		elem = o->registry.aprox_match.vec[i];
3170 		for (; elem; cnt++)
3171 			elem &= elem - 1;
3172 	}
3173 
3174 	o->set_registry_size(o, cnt);
3175 
3176 	return 0;
3177 }
3178 
3179 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
3180 				struct bnx2x_mcast_ramrod_params *p,
3181 				enum bnx2x_mcast_cmd cmd)
3182 {
3183 	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
3184 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3185 	struct eth_multicast_rules_ramrod_data *data =
3186 		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3187 	int cnt = 0, rc;
3188 
3189 	/* Reset the ramrod data buffer */
3190 	memset(data, 0, sizeof(*data));
3191 
3192 	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
3193 
3194 	/* If there are no more pending commands - clear SCHEDULED state */
3195 	if (list_empty(&o->pending_cmds_head))
3196 		o->clear_sched(o);
3197 
3198 	/* The below may be true iff there was enough room in ramrod
3199 	 * data for all pending commands and for the current
3200 	 * command. Otherwise the current command would have been added
3201 	 * to the pending commands and p->mcast_list_len would have been
3202 	 * zeroed.
3203 	 */
3204 	if (p->mcast_list_len > 0)
3205 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3206 
3207 	/* We've pulled out some MACs - update the total number of
3208 	 * outstanding.
3209 	 */
3210 	o->total_pending_num -= cnt;
3211 
3212 	/* send a ramrod */
3213 	WARN_ON(o->total_pending_num < 0);
3214 	WARN_ON(cnt > o->max_cmd_len);
3215 
3216 	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3217 
3218 	/* Update a registry size if there are no more pending operations.
3219 	 *
3220 	 * We don't want to change the value of the registry size if there are
3221 	 * pending operations because we want it to always be equal to the
3222 	 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3223 	 * set bins after the last requested operation in order to properly
3224 	 * evaluate the size of the next DEL/RESTORE operation.
3225 	 *
3226 	 * Note that we update the registry itself during command(s) handling
3227 	 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3228 	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3229 	 * with a limited amount of update commands (per MAC/bin) and we don't
3230 	 * know in this scope what the actual state of bins configuration is
3231 	 * going to be after this ramrod.
3232 	 */
3233 	if (!o->total_pending_num)
3234 		bnx2x_mcast_refresh_registry_e2(bp, o);
3235 
3236 	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3237 	 * RAMROD_PENDING status immediately.
3238 	 */
3239 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3240 		raw->clear_pending(raw);
3241 		return 0;
3242 	} else {
3243 		/* No need for an explicit memory barrier here as long we would
3244 		 * need to ensure the ordering of writing to the SPQ element
3245 		 * and updating of the SPQ producer which involves a memory
3246 		 * read and we will have to put a full memory barrier there
3247 		 * (inside bnx2x_sp_post()).
3248 		 */
3249 
3250 		/* Send a ramrod */
3251 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3252 				   raw->cid, U64_HI(raw->rdata_mapping),
3253 				   U64_LO(raw->rdata_mapping),
3254 				   ETH_CONNECTION_TYPE);
3255 		if (rc)
3256 			return rc;
3257 
3258 		/* Ramrod completion is pending */
3259 		return 1;
3260 	}
3261 }
3262 
3263 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3264 				    struct bnx2x_mcast_ramrod_params *p,
3265 				    enum bnx2x_mcast_cmd cmd)
3266 {
3267 	/* Mark, that there is a work to do */
3268 	if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3269 		p->mcast_list_len = 1;
3270 
3271 	return 0;
3272 }
3273 
3274 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3275 				       struct bnx2x_mcast_ramrod_params *p,
3276 				       int old_num_bins)
3277 {
3278 	/* Do nothing */
3279 }
3280 
3281 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3282 do { \
3283 	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3284 } while (0)
3285 
3286 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3287 					   struct bnx2x_mcast_obj *o,
3288 					   struct bnx2x_mcast_ramrod_params *p,
3289 					   u32 *mc_filter)
3290 {
3291 	struct bnx2x_mcast_list_elem *mlist_pos;
3292 	int bit;
3293 
3294 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3295 		bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3296 		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3297 
3298 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3299 		   mlist_pos->mac, bit);
3300 
3301 		/* bookkeeping... */
3302 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3303 				  bit);
3304 	}
3305 }
3306 
3307 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3308 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3309 	u32 *mc_filter)
3310 {
3311 	int bit;
3312 
3313 	for (bit = bnx2x_mcast_get_next_bin(o, 0);
3314 	     bit >= 0;
3315 	     bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3316 		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3317 		DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3318 	}
3319 }
3320 
3321 /* On 57711 we write the multicast MACs' approximate match
3322  * table by directly into the TSTORM's internal RAM. So we don't
3323  * really need to handle any tricks to make it work.
3324  */
3325 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3326 				 struct bnx2x_mcast_ramrod_params *p,
3327 				 enum bnx2x_mcast_cmd cmd)
3328 {
3329 	int i;
3330 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3331 	struct bnx2x_raw_obj *r = &o->raw;
3332 
3333 	/* If CLEAR_ONLY has been requested - clear the registry
3334 	 * and clear a pending bit.
3335 	 */
3336 	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3337 		u32 mc_filter[MC_HASH_SIZE] = {0};
3338 
3339 		/* Set the multicast filter bits before writing it into
3340 		 * the internal memory.
3341 		 */
3342 		switch (cmd) {
3343 		case BNX2X_MCAST_CMD_ADD:
3344 			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3345 			break;
3346 
3347 		case BNX2X_MCAST_CMD_DEL:
3348 			DP(BNX2X_MSG_SP,
3349 			   "Invalidating multicast MACs configuration\n");
3350 
3351 			/* clear the registry */
3352 			memset(o->registry.aprox_match.vec, 0,
3353 			       sizeof(o->registry.aprox_match.vec));
3354 			break;
3355 
3356 		case BNX2X_MCAST_CMD_RESTORE:
3357 			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3358 			break;
3359 
3360 		default:
3361 			BNX2X_ERR("Unknown command: %d\n", cmd);
3362 			return -EINVAL;
3363 		}
3364 
3365 		/* Set the mcast filter in the internal memory */
3366 		for (i = 0; i < MC_HASH_SIZE; i++)
3367 			REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3368 	} else
3369 		/* clear the registry */
3370 		memset(o->registry.aprox_match.vec, 0,
3371 		       sizeof(o->registry.aprox_match.vec));
3372 
3373 	/* We are done */
3374 	r->clear_pending(r);
3375 
3376 	return 0;
3377 }
3378 
3379 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3380 				   struct bnx2x_mcast_ramrod_params *p,
3381 				   enum bnx2x_mcast_cmd cmd)
3382 {
3383 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3384 	int reg_sz = o->get_registry_size(o);
3385 
3386 	switch (cmd) {
3387 	/* DEL command deletes all currently configured MACs */
3388 	case BNX2X_MCAST_CMD_DEL:
3389 		o->set_registry_size(o, 0);
3390 		/* Don't break */
3391 
3392 	/* RESTORE command will restore the entire multicast configuration */
3393 	case BNX2X_MCAST_CMD_RESTORE:
3394 		p->mcast_list_len = reg_sz;
3395 		  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3396 				   cmd, p->mcast_list_len);
3397 		break;
3398 
3399 	case BNX2X_MCAST_CMD_ADD:
3400 	case BNX2X_MCAST_CMD_CONT:
3401 		/* Multicast MACs on 57710 are configured as unicast MACs and
3402 		 * there is only a limited number of CAM entries for that
3403 		 * matter.
3404 		 */
3405 		if (p->mcast_list_len > o->max_cmd_len) {
3406 			BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3407 				  o->max_cmd_len);
3408 			return -EINVAL;
3409 		}
3410 		/* Every configured MAC should be cleared if DEL command is
3411 		 * called. Only the last ADD command is relevant as long as
3412 		 * every ADD commands overrides the previous configuration.
3413 		 */
3414 		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3415 		if (p->mcast_list_len > 0)
3416 			o->set_registry_size(o, p->mcast_list_len);
3417 
3418 		break;
3419 
3420 	default:
3421 		BNX2X_ERR("Unknown command: %d\n", cmd);
3422 		return -EINVAL;
3423 	}
3424 
3425 	/* We want to ensure that commands are executed one by one for 57710.
3426 	 * Therefore each none-empty command will consume o->max_cmd_len.
3427 	 */
3428 	if (p->mcast_list_len)
3429 		o->total_pending_num += o->max_cmd_len;
3430 
3431 	return 0;
3432 }
3433 
3434 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3435 				      struct bnx2x_mcast_ramrod_params *p,
3436 				      int old_num_macs)
3437 {
3438 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3439 
3440 	o->set_registry_size(o, old_num_macs);
3441 
3442 	/* If current command hasn't been handled yet and we are
3443 	 * here means that it's meant to be dropped and we have to
3444 	 * update the number of outstanding MACs accordingly.
3445 	 */
3446 	if (p->mcast_list_len)
3447 		o->total_pending_num -= o->max_cmd_len;
3448 }
3449 
3450 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3451 					struct bnx2x_mcast_obj *o, int idx,
3452 					union bnx2x_mcast_config_data *cfg_data,
3453 					enum bnx2x_mcast_cmd cmd)
3454 {
3455 	struct bnx2x_raw_obj *r = &o->raw;
3456 	struct mac_configuration_cmd *data =
3457 		(struct mac_configuration_cmd *)(r->rdata);
3458 
3459 	/* copy mac */
3460 	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3461 		bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3462 				      &data->config_table[idx].middle_mac_addr,
3463 				      &data->config_table[idx].lsb_mac_addr,
3464 				      cfg_data->mac);
3465 
3466 		data->config_table[idx].vlan_id = 0;
3467 		data->config_table[idx].pf_id = r->func_id;
3468 		data->config_table[idx].clients_bit_vector =
3469 			cpu_to_le32(1 << r->cl_id);
3470 
3471 		SET_FLAG(data->config_table[idx].flags,
3472 			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3473 			 T_ETH_MAC_COMMAND_SET);
3474 	}
3475 }
3476 
3477 /**
3478  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3479  *
3480  * @bp:		device handle
3481  * @p:
3482  * @len:	number of rules to handle
3483  */
3484 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3485 					struct bnx2x_mcast_ramrod_params *p,
3486 					u8 len)
3487 {
3488 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3489 	struct mac_configuration_cmd *data =
3490 		(struct mac_configuration_cmd *)(r->rdata);
3491 
3492 	u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3493 		     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3494 		     BNX2X_MAX_MULTICAST*(1 + r->func_id));
3495 
3496 	data->hdr.offset = offset;
3497 	data->hdr.client_id = cpu_to_le16(0xff);
3498 	data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3499 				     (BNX2X_FILTER_MCAST_PENDING <<
3500 				      BNX2X_SWCID_SHIFT));
3501 	data->hdr.length = len;
3502 }
3503 
3504 /**
3505  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3506  *
3507  * @bp:		device handle
3508  * @o:
3509  * @start_idx:	index in the registry to start from
3510  * @rdata_idx:	index in the ramrod data to start from
3511  *
3512  * restore command for 57710 is like all other commands - always a stand alone
3513  * command - start_idx and rdata_idx will always be 0. This function will always
3514  * succeed.
3515  * returns -1 to comply with 57712 variant.
3516  */
3517 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3518 	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3519 	int *rdata_idx)
3520 {
3521 	struct bnx2x_mcast_mac_elem *elem;
3522 	int i = 0;
3523 	union bnx2x_mcast_config_data cfg_data = {NULL};
3524 
3525 	/* go through the registry and configure the MACs from it. */
3526 	list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3527 		cfg_data.mac = &elem->mac[0];
3528 		o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3529 
3530 		i++;
3531 
3532 		  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3533 		     cfg_data.mac);
3534 	}
3535 
3536 	*rdata_idx = i;
3537 
3538 	return -1;
3539 }
3540 
3541 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3542 	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3543 {
3544 	struct bnx2x_pending_mcast_cmd *cmd_pos;
3545 	struct bnx2x_mcast_mac_elem *pmac_pos;
3546 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3547 	union bnx2x_mcast_config_data cfg_data = {NULL};
3548 	int cnt = 0;
3549 
3550 	/* If nothing to be done - return */
3551 	if (list_empty(&o->pending_cmds_head))
3552 		return 0;
3553 
3554 	/* Handle the first command */
3555 	cmd_pos = list_first_entry(&o->pending_cmds_head,
3556 				   struct bnx2x_pending_mcast_cmd, link);
3557 
3558 	switch (cmd_pos->type) {
3559 	case BNX2X_MCAST_CMD_ADD:
3560 		list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3561 			cfg_data.mac = &pmac_pos->mac[0];
3562 			o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3563 
3564 			cnt++;
3565 
3566 			DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3567 			   pmac_pos->mac);
3568 		}
3569 		break;
3570 
3571 	case BNX2X_MCAST_CMD_DEL:
3572 		cnt = cmd_pos->data.macs_num;
3573 		DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3574 		break;
3575 
3576 	case BNX2X_MCAST_CMD_RESTORE:
3577 		o->hdl_restore(bp, o, 0, &cnt);
3578 		break;
3579 
3580 	default:
3581 		BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3582 		return -EINVAL;
3583 	}
3584 
3585 	list_del(&cmd_pos->link);
3586 	kfree(cmd_pos);
3587 
3588 	return cnt;
3589 }
3590 
3591 /**
3592  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3593  *
3594  * @fw_hi:
3595  * @fw_mid:
3596  * @fw_lo:
3597  * @mac:
3598  */
3599 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3600 					 __le16 *fw_lo, u8 *mac)
3601 {
3602 	mac[1] = ((u8 *)fw_hi)[0];
3603 	mac[0] = ((u8 *)fw_hi)[1];
3604 	mac[3] = ((u8 *)fw_mid)[0];
3605 	mac[2] = ((u8 *)fw_mid)[1];
3606 	mac[5] = ((u8 *)fw_lo)[0];
3607 	mac[4] = ((u8 *)fw_lo)[1];
3608 }
3609 
3610 /**
3611  * bnx2x_mcast_refresh_registry_e1 -
3612  *
3613  * @bp:		device handle
3614  * @cnt:
3615  *
3616  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3617  * and update the registry correspondingly: if ADD - allocate a memory and add
3618  * the entries to the registry (list), if DELETE - clear the registry and free
3619  * the memory.
3620  */
3621 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3622 						  struct bnx2x_mcast_obj *o)
3623 {
3624 	struct bnx2x_raw_obj *raw = &o->raw;
3625 	struct bnx2x_mcast_mac_elem *elem;
3626 	struct mac_configuration_cmd *data =
3627 			(struct mac_configuration_cmd *)(raw->rdata);
3628 
3629 	/* If first entry contains a SET bit - the command was ADD,
3630 	 * otherwise - DEL_ALL
3631 	 */
3632 	if (GET_FLAG(data->config_table[0].flags,
3633 			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3634 		int i, len = data->hdr.length;
3635 
3636 		/* Break if it was a RESTORE command */
3637 		if (!list_empty(&o->registry.exact_match.macs))
3638 			return 0;
3639 
3640 		elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3641 		if (!elem) {
3642 			BNX2X_ERR("Failed to allocate registry memory\n");
3643 			return -ENOMEM;
3644 		}
3645 
3646 		for (i = 0; i < len; i++, elem++) {
3647 			bnx2x_get_fw_mac_addr(
3648 				&data->config_table[i].msb_mac_addr,
3649 				&data->config_table[i].middle_mac_addr,
3650 				&data->config_table[i].lsb_mac_addr,
3651 				elem->mac);
3652 			DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3653 			   elem->mac);
3654 			list_add_tail(&elem->link,
3655 				      &o->registry.exact_match.macs);
3656 		}
3657 	} else {
3658 		elem = list_first_entry(&o->registry.exact_match.macs,
3659 					struct bnx2x_mcast_mac_elem, link);
3660 		DP(BNX2X_MSG_SP, "Deleting a registry\n");
3661 		kfree(elem);
3662 		INIT_LIST_HEAD(&o->registry.exact_match.macs);
3663 	}
3664 
3665 	return 0;
3666 }
3667 
3668 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3669 				struct bnx2x_mcast_ramrod_params *p,
3670 				enum bnx2x_mcast_cmd cmd)
3671 {
3672 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3673 	struct bnx2x_raw_obj *raw = &o->raw;
3674 	struct mac_configuration_cmd *data =
3675 		(struct mac_configuration_cmd *)(raw->rdata);
3676 	int cnt = 0, i, rc;
3677 
3678 	/* Reset the ramrod data buffer */
3679 	memset(data, 0, sizeof(*data));
3680 
3681 	/* First set all entries as invalid */
3682 	for (i = 0; i < o->max_cmd_len ; i++)
3683 		SET_FLAG(data->config_table[i].flags,
3684 			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3685 			 T_ETH_MAC_COMMAND_INVALIDATE);
3686 
3687 	/* Handle pending commands first */
3688 	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3689 
3690 	/* If there are no more pending commands - clear SCHEDULED state */
3691 	if (list_empty(&o->pending_cmds_head))
3692 		o->clear_sched(o);
3693 
3694 	/* The below may be true iff there were no pending commands */
3695 	if (!cnt)
3696 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3697 
3698 	/* For 57710 every command has o->max_cmd_len length to ensure that
3699 	 * commands are done one at a time.
3700 	 */
3701 	o->total_pending_num -= o->max_cmd_len;
3702 
3703 	/* send a ramrod */
3704 
3705 	WARN_ON(cnt > o->max_cmd_len);
3706 
3707 	/* Set ramrod header (in particular, a number of entries to update) */
3708 	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3709 
3710 	/* update a registry: we need the registry contents to be always up
3711 	 * to date in order to be able to execute a RESTORE opcode. Here
3712 	 * we use the fact that for 57710 we sent one command at a time
3713 	 * hence we may take the registry update out of the command handling
3714 	 * and do it in a simpler way here.
3715 	 */
3716 	rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3717 	if (rc)
3718 		return rc;
3719 
3720 	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3721 	 * RAMROD_PENDING status immediately.
3722 	 */
3723 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3724 		raw->clear_pending(raw);
3725 		return 0;
3726 	} else {
3727 		/* No need for an explicit memory barrier here as long we would
3728 		 * need to ensure the ordering of writing to the SPQ element
3729 		 * and updating of the SPQ producer which involves a memory
3730 		 * read and we will have to put a full memory barrier there
3731 		 * (inside bnx2x_sp_post()).
3732 		 */
3733 
3734 		/* Send a ramrod */
3735 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3736 				   U64_HI(raw->rdata_mapping),
3737 				   U64_LO(raw->rdata_mapping),
3738 				   ETH_CONNECTION_TYPE);
3739 		if (rc)
3740 			return rc;
3741 
3742 		/* Ramrod completion is pending */
3743 		return 1;
3744 	}
3745 }
3746 
3747 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3748 {
3749 	return o->registry.exact_match.num_macs_set;
3750 }
3751 
3752 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3753 {
3754 	return o->registry.aprox_match.num_bins_set;
3755 }
3756 
3757 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3758 						int n)
3759 {
3760 	o->registry.exact_match.num_macs_set = n;
3761 }
3762 
3763 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3764 						int n)
3765 {
3766 	o->registry.aprox_match.num_bins_set = n;
3767 }
3768 
3769 int bnx2x_config_mcast(struct bnx2x *bp,
3770 		       struct bnx2x_mcast_ramrod_params *p,
3771 		       enum bnx2x_mcast_cmd cmd)
3772 {
3773 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3774 	struct bnx2x_raw_obj *r = &o->raw;
3775 	int rc = 0, old_reg_size;
3776 
3777 	/* This is needed to recover number of currently configured mcast macs
3778 	 * in case of failure.
3779 	 */
3780 	old_reg_size = o->get_registry_size(o);
3781 
3782 	/* Do some calculations and checks */
3783 	rc = o->validate(bp, p, cmd);
3784 	if (rc)
3785 		return rc;
3786 
3787 	/* Return if there is no work to do */
3788 	if ((!p->mcast_list_len) && (!o->check_sched(o)))
3789 		return 0;
3790 
3791 	DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3792 	   o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3793 
3794 	/* Enqueue the current command to the pending list if we can't complete
3795 	 * it in the current iteration
3796 	 */
3797 	if (r->check_pending(r) ||
3798 	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3799 		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3800 		if (rc < 0)
3801 			goto error_exit1;
3802 
3803 		/* As long as the current command is in a command list we
3804 		 * don't need to handle it separately.
3805 		 */
3806 		p->mcast_list_len = 0;
3807 	}
3808 
3809 	if (!r->check_pending(r)) {
3810 
3811 		/* Set 'pending' state */
3812 		r->set_pending(r);
3813 
3814 		/* Configure the new classification in the chip */
3815 		rc = o->config_mcast(bp, p, cmd);
3816 		if (rc < 0)
3817 			goto error_exit2;
3818 
3819 		/* Wait for a ramrod completion if was requested */
3820 		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3821 			rc = o->wait_comp(bp, o);
3822 	}
3823 
3824 	return rc;
3825 
3826 error_exit2:
3827 	r->clear_pending(r);
3828 
3829 error_exit1:
3830 	o->revert(bp, p, old_reg_size);
3831 
3832 	return rc;
3833 }
3834 
3835 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3836 {
3837 	smp_mb__before_clear_bit();
3838 	clear_bit(o->sched_state, o->raw.pstate);
3839 	smp_mb__after_clear_bit();
3840 }
3841 
3842 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3843 {
3844 	smp_mb__before_clear_bit();
3845 	set_bit(o->sched_state, o->raw.pstate);
3846 	smp_mb__after_clear_bit();
3847 }
3848 
3849 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3850 {
3851 	return !!test_bit(o->sched_state, o->raw.pstate);
3852 }
3853 
3854 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3855 {
3856 	return o->raw.check_pending(&o->raw) || o->check_sched(o);
3857 }
3858 
3859 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3860 			  struct bnx2x_mcast_obj *mcast_obj,
3861 			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3862 			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3863 			  int state, unsigned long *pstate, bnx2x_obj_type type)
3864 {
3865 	memset(mcast_obj, 0, sizeof(*mcast_obj));
3866 
3867 	bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3868 			   rdata, rdata_mapping, state, pstate, type);
3869 
3870 	mcast_obj->engine_id = engine_id;
3871 
3872 	INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3873 
3874 	mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3875 	mcast_obj->check_sched = bnx2x_mcast_check_sched;
3876 	mcast_obj->set_sched = bnx2x_mcast_set_sched;
3877 	mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3878 
3879 	if (CHIP_IS_E1(bp)) {
3880 		mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3881 		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3882 		mcast_obj->hdl_restore       =
3883 			bnx2x_mcast_handle_restore_cmd_e1;
3884 		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3885 
3886 		if (CHIP_REV_IS_SLOW(bp))
3887 			mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3888 		else
3889 			mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3890 
3891 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3892 		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3893 		mcast_obj->validate          = bnx2x_mcast_validate_e1;
3894 		mcast_obj->revert            = bnx2x_mcast_revert_e1;
3895 		mcast_obj->get_registry_size =
3896 			bnx2x_mcast_get_registry_size_exact;
3897 		mcast_obj->set_registry_size =
3898 			bnx2x_mcast_set_registry_size_exact;
3899 
3900 		/* 57710 is the only chip that uses the exact match for mcast
3901 		 * at the moment.
3902 		 */
3903 		INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3904 
3905 	} else if (CHIP_IS_E1H(bp)) {
3906 		mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3907 		mcast_obj->enqueue_cmd   = NULL;
3908 		mcast_obj->hdl_restore   = NULL;
3909 		mcast_obj->check_pending = bnx2x_mcast_check_pending;
3910 
3911 		/* 57711 doesn't send a ramrod, so it has unlimited credit
3912 		 * for one command.
3913 		 */
3914 		mcast_obj->max_cmd_len       = -1;
3915 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3916 		mcast_obj->set_one_rule      = NULL;
3917 		mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3918 		mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3919 		mcast_obj->get_registry_size =
3920 			bnx2x_mcast_get_registry_size_aprox;
3921 		mcast_obj->set_registry_size =
3922 			bnx2x_mcast_set_registry_size_aprox;
3923 	} else {
3924 		mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3925 		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3926 		mcast_obj->hdl_restore       =
3927 			bnx2x_mcast_handle_restore_cmd_e2;
3928 		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3929 		/* TODO: There should be a proper HSI define for this number!!!
3930 		 */
3931 		mcast_obj->max_cmd_len       = 16;
3932 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3933 		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3934 		mcast_obj->validate          = bnx2x_mcast_validate_e2;
3935 		mcast_obj->revert            = bnx2x_mcast_revert_e2;
3936 		mcast_obj->get_registry_size =
3937 			bnx2x_mcast_get_registry_size_aprox;
3938 		mcast_obj->set_registry_size =
3939 			bnx2x_mcast_set_registry_size_aprox;
3940 	}
3941 }
3942 
3943 /*************************** Credit handling **********************************/
3944 
3945 /**
3946  * atomic_add_ifless - add if the result is less than a given value.
3947  *
3948  * @v:	pointer of type atomic_t
3949  * @a:	the amount to add to v...
3950  * @u:	...if (v + a) is less than u.
3951  *
3952  * returns true if (v + a) was less than u, and false otherwise.
3953  *
3954  */
3955 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3956 {
3957 	int c, old;
3958 
3959 	c = atomic_read(v);
3960 	for (;;) {
3961 		if (unlikely(c + a >= u))
3962 			return false;
3963 
3964 		old = atomic_cmpxchg((v), c, c + a);
3965 		if (likely(old == c))
3966 			break;
3967 		c = old;
3968 	}
3969 
3970 	return true;
3971 }
3972 
3973 /**
3974  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3975  *
3976  * @v:	pointer of type atomic_t
3977  * @a:	the amount to dec from v...
3978  * @u:	...if (v - a) is more or equal than u.
3979  *
3980  * returns true if (v - a) was more or equal than u, and false
3981  * otherwise.
3982  */
3983 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3984 {
3985 	int c, old;
3986 
3987 	c = atomic_read(v);
3988 	for (;;) {
3989 		if (unlikely(c - a < u))
3990 			return false;
3991 
3992 		old = atomic_cmpxchg((v), c, c - a);
3993 		if (likely(old == c))
3994 			break;
3995 		c = old;
3996 	}
3997 
3998 	return true;
3999 }
4000 
4001 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
4002 {
4003 	bool rc;
4004 
4005 	smp_mb();
4006 	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4007 	smp_mb();
4008 
4009 	return rc;
4010 }
4011 
4012 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
4013 {
4014 	bool rc;
4015 
4016 	smp_mb();
4017 
4018 	/* Don't let to refill if credit + cnt > pool_sz */
4019 	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4020 
4021 	smp_mb();
4022 
4023 	return rc;
4024 }
4025 
4026 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
4027 {
4028 	int cur_credit;
4029 
4030 	smp_mb();
4031 	cur_credit = atomic_read(&o->credit);
4032 
4033 	return cur_credit;
4034 }
4035 
4036 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
4037 					  int cnt)
4038 {
4039 	return true;
4040 }
4041 
4042 static bool bnx2x_credit_pool_get_entry(
4043 	struct bnx2x_credit_pool_obj *o,
4044 	int *offset)
4045 {
4046 	int idx, vec, i;
4047 
4048 	*offset = -1;
4049 
4050 	/* Find "internal cam-offset" then add to base for this object... */
4051 	for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
4052 
4053 		/* Skip the current vector if there are no free entries in it */
4054 		if (!o->pool_mirror[vec])
4055 			continue;
4056 
4057 		/* If we've got here we are going to find a free entry */
4058 		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4059 		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
4060 
4061 			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4062 				/* Got one!! */
4063 				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4064 				*offset = o->base_pool_offset + idx;
4065 				return true;
4066 			}
4067 	}
4068 
4069 	return false;
4070 }
4071 
4072 static bool bnx2x_credit_pool_put_entry(
4073 	struct bnx2x_credit_pool_obj *o,
4074 	int offset)
4075 {
4076 	if (offset < o->base_pool_offset)
4077 		return false;
4078 
4079 	offset -= o->base_pool_offset;
4080 
4081 	if (offset >= o->pool_sz)
4082 		return false;
4083 
4084 	/* Return the entry to the pool */
4085 	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4086 
4087 	return true;
4088 }
4089 
4090 static bool bnx2x_credit_pool_put_entry_always_true(
4091 	struct bnx2x_credit_pool_obj *o,
4092 	int offset)
4093 {
4094 	return true;
4095 }
4096 
4097 static bool bnx2x_credit_pool_get_entry_always_true(
4098 	struct bnx2x_credit_pool_obj *o,
4099 	int *offset)
4100 {
4101 	*offset = -1;
4102 	return true;
4103 }
4104 /**
4105  * bnx2x_init_credit_pool - initialize credit pool internals.
4106  *
4107  * @p:
4108  * @base:	Base entry in the CAM to use.
4109  * @credit:	pool size.
4110  *
4111  * If base is negative no CAM entries handling will be performed.
4112  * If credit is negative pool operations will always succeed (unlimited pool).
4113  *
4114  */
4115 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
4116 					  int base, int credit)
4117 {
4118 	/* Zero the object first */
4119 	memset(p, 0, sizeof(*p));
4120 
4121 	/* Set the table to all 1s */
4122 	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4123 
4124 	/* Init a pool as full */
4125 	atomic_set(&p->credit, credit);
4126 
4127 	/* The total poll size */
4128 	p->pool_sz = credit;
4129 
4130 	p->base_pool_offset = base;
4131 
4132 	/* Commit the change */
4133 	smp_mb();
4134 
4135 	p->check = bnx2x_credit_pool_check;
4136 
4137 	/* if pool credit is negative - disable the checks */
4138 	if (credit >= 0) {
4139 		p->put      = bnx2x_credit_pool_put;
4140 		p->get      = bnx2x_credit_pool_get;
4141 		p->put_entry = bnx2x_credit_pool_put_entry;
4142 		p->get_entry = bnx2x_credit_pool_get_entry;
4143 	} else {
4144 		p->put      = bnx2x_credit_pool_always_true;
4145 		p->get      = bnx2x_credit_pool_always_true;
4146 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4147 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4148 	}
4149 
4150 	/* If base is negative - disable entries handling */
4151 	if (base < 0) {
4152 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4153 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4154 	}
4155 }
4156 
4157 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
4158 				struct bnx2x_credit_pool_obj *p, u8 func_id,
4159 				u8 func_num)
4160 {
4161 /* TODO: this will be defined in consts as well... */
4162 #define BNX2X_CAM_SIZE_EMUL 5
4163 
4164 	int cam_sz;
4165 
4166 	if (CHIP_IS_E1(bp)) {
4167 		/* In E1, Multicast is saved in cam... */
4168 		if (!CHIP_REV_IS_SLOW(bp))
4169 			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
4170 		else
4171 			cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
4172 
4173 		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4174 
4175 	} else if (CHIP_IS_E1H(bp)) {
4176 		/* CAM credit is equaly divided between all active functions
4177 		 * on the PORT!.
4178 		 */
4179 		if ((func_num > 0)) {
4180 			if (!CHIP_REV_IS_SLOW(bp))
4181 				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4182 			else
4183 				cam_sz = BNX2X_CAM_SIZE_EMUL;
4184 			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4185 		} else {
4186 			/* this should never happen! Block MAC operations. */
4187 			bnx2x_init_credit_pool(p, 0, 0);
4188 		}
4189 
4190 	} else {
4191 
4192 		/* CAM credit is equaly divided between all active functions
4193 		 * on the PATH.
4194 		 */
4195 		if ((func_num > 0)) {
4196 			if (!CHIP_REV_IS_SLOW(bp))
4197 				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4198 			else
4199 				cam_sz = BNX2X_CAM_SIZE_EMUL;
4200 
4201 			/* No need for CAM entries handling for 57712 and
4202 			 * newer.
4203 			 */
4204 			bnx2x_init_credit_pool(p, -1, cam_sz);
4205 		} else {
4206 			/* this should never happen! Block MAC operations. */
4207 			bnx2x_init_credit_pool(p, 0, 0);
4208 		}
4209 	}
4210 }
4211 
4212 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4213 				 struct bnx2x_credit_pool_obj *p,
4214 				 u8 func_id,
4215 				 u8 func_num)
4216 {
4217 	if (CHIP_IS_E1x(bp)) {
4218 		/* There is no VLAN credit in HW on 57710 and 57711 only
4219 		 * MAC / MAC-VLAN can be set
4220 		 */
4221 		bnx2x_init_credit_pool(p, 0, -1);
4222 	} else {
4223 		/* CAM credit is equally divided between all active functions
4224 		 * on the PATH.
4225 		 */
4226 		if (func_num > 0) {
4227 			int credit = MAX_VLAN_CREDIT_E2 / func_num;
4228 			bnx2x_init_credit_pool(p, func_id * credit, credit);
4229 		} else
4230 			/* this should never happen! Block VLAN operations. */
4231 			bnx2x_init_credit_pool(p, 0, 0);
4232 	}
4233 }
4234 
4235 /****************** RSS Configuration ******************/
4236 /**
4237  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4238  *
4239  * @bp:		driver handle
4240  * @p:		pointer to rss configuration
4241  *
4242  * Prints it when NETIF_MSG_IFUP debug level is configured.
4243  */
4244 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4245 					struct bnx2x_config_rss_params *p)
4246 {
4247 	int i;
4248 
4249 	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4250 	DP(BNX2X_MSG_SP, "0x0000: ");
4251 	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4252 		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4253 
4254 		/* Print 4 bytes in a line */
4255 		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4256 		    (((i + 1) & 0x3) == 0)) {
4257 			DP_CONT(BNX2X_MSG_SP, "\n");
4258 			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4259 		}
4260 	}
4261 
4262 	DP_CONT(BNX2X_MSG_SP, "\n");
4263 }
4264 
4265 /**
4266  * bnx2x_setup_rss - configure RSS
4267  *
4268  * @bp:		device handle
4269  * @p:		rss configuration
4270  *
4271  * sends on UPDATE ramrod for that matter.
4272  */
4273 static int bnx2x_setup_rss(struct bnx2x *bp,
4274 			   struct bnx2x_config_rss_params *p)
4275 {
4276 	struct bnx2x_rss_config_obj *o = p->rss_obj;
4277 	struct bnx2x_raw_obj *r = &o->raw;
4278 	struct eth_rss_update_ramrod_data *data =
4279 		(struct eth_rss_update_ramrod_data *)(r->rdata);
4280 	u8 rss_mode = 0;
4281 	int rc;
4282 
4283 	memset(data, 0, sizeof(*data));
4284 
4285 	DP(BNX2X_MSG_SP, "Configuring RSS\n");
4286 
4287 	/* Set an echo field */
4288 	data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4289 				 (r->state << BNX2X_SWCID_SHIFT));
4290 
4291 	/* RSS mode */
4292 	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4293 		rss_mode = ETH_RSS_MODE_DISABLED;
4294 	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4295 		rss_mode = ETH_RSS_MODE_REGULAR;
4296 
4297 	data->rss_mode = rss_mode;
4298 
4299 	DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4300 
4301 	/* RSS capabilities */
4302 	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4303 		data->capabilities |=
4304 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4305 
4306 	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4307 		data->capabilities |=
4308 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4309 
4310 	if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4311 		data->capabilities |=
4312 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4313 
4314 	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4315 		data->capabilities |=
4316 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4317 
4318 	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4319 		data->capabilities |=
4320 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4321 
4322 	if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4323 		data->capabilities |=
4324 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4325 
4326 	/* Hashing mask */
4327 	data->rss_result_mask = p->rss_result_mask;
4328 
4329 	/* RSS engine ID */
4330 	data->rss_engine_id = o->engine_id;
4331 
4332 	DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4333 
4334 	/* Indirection table */
4335 	memcpy(data->indirection_table, p->ind_table,
4336 		  T_ETH_INDIRECTION_TABLE_SIZE);
4337 
4338 	/* Remember the last configuration */
4339 	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4340 
4341 	/* Print the indirection table */
4342 	if (netif_msg_ifup(bp))
4343 		bnx2x_debug_print_ind_table(bp, p);
4344 
4345 	/* RSS keys */
4346 	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4347 		memcpy(&data->rss_key[0], &p->rss_key[0],
4348 		       sizeof(data->rss_key));
4349 		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4350 	}
4351 
4352 	/* No need for an explicit memory barrier here as long we would
4353 	 * need to ensure the ordering of writing to the SPQ element
4354 	 * and updating of the SPQ producer which involves a memory
4355 	 * read and we will have to put a full memory barrier there
4356 	 * (inside bnx2x_sp_post()).
4357 	 */
4358 
4359 	/* Send a ramrod */
4360 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4361 			   U64_HI(r->rdata_mapping),
4362 			   U64_LO(r->rdata_mapping),
4363 			   ETH_CONNECTION_TYPE);
4364 
4365 	if (rc < 0)
4366 		return rc;
4367 
4368 	return 1;
4369 }
4370 
4371 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4372 			     u8 *ind_table)
4373 {
4374 	memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4375 }
4376 
4377 int bnx2x_config_rss(struct bnx2x *bp,
4378 		     struct bnx2x_config_rss_params *p)
4379 {
4380 	int rc;
4381 	struct bnx2x_rss_config_obj *o = p->rss_obj;
4382 	struct bnx2x_raw_obj *r = &o->raw;
4383 
4384 	/* Do nothing if only driver cleanup was requested */
4385 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4386 		return 0;
4387 
4388 	r->set_pending(r);
4389 
4390 	rc = o->config_rss(bp, p);
4391 	if (rc < 0) {
4392 		r->clear_pending(r);
4393 		return rc;
4394 	}
4395 
4396 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4397 		rc = r->wait_comp(bp, r);
4398 
4399 	return rc;
4400 }
4401 
4402 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4403 			       struct bnx2x_rss_config_obj *rss_obj,
4404 			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4405 			       void *rdata, dma_addr_t rdata_mapping,
4406 			       int state, unsigned long *pstate,
4407 			       bnx2x_obj_type type)
4408 {
4409 	bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4410 			   rdata_mapping, state, pstate, type);
4411 
4412 	rss_obj->engine_id  = engine_id;
4413 	rss_obj->config_rss = bnx2x_setup_rss;
4414 }
4415 
4416 int validate_vlan_mac(struct bnx2x *bp,
4417 		      struct bnx2x_vlan_mac_obj *vlan_mac)
4418 {
4419 	if (!vlan_mac->get_n_elements) {
4420 		BNX2X_ERR("vlan mac object was not intialized\n");
4421 		return -EINVAL;
4422 	}
4423 	return 0;
4424 }
4425 
4426 /********************** Queue state object ***********************************/
4427 
4428 /**
4429  * bnx2x_queue_state_change - perform Queue state change transition
4430  *
4431  * @bp:		device handle
4432  * @params:	parameters to perform the transition
4433  *
4434  * returns 0 in case of successfully completed transition, negative error
4435  * code in case of failure, positive (EBUSY) value if there is a completion
4436  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4437  * not set in params->ramrod_flags for asynchronous commands).
4438  *
4439  */
4440 int bnx2x_queue_state_change(struct bnx2x *bp,
4441 			     struct bnx2x_queue_state_params *params)
4442 {
4443 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4444 	int rc, pending_bit;
4445 	unsigned long *pending = &o->pending;
4446 
4447 	/* Check that the requested transition is legal */
4448 	rc = o->check_transition(bp, o, params);
4449 	if (rc) {
4450 		BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4451 		return -EINVAL;
4452 	}
4453 
4454 	/* Set "pending" bit */
4455 	DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4456 	pending_bit = o->set_pending(o, params);
4457 	DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4458 
4459 	/* Don't send a command if only driver cleanup was requested */
4460 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4461 		o->complete_cmd(bp, o, pending_bit);
4462 	else {
4463 		/* Send a ramrod */
4464 		rc = o->send_cmd(bp, params);
4465 		if (rc) {
4466 			o->next_state = BNX2X_Q_STATE_MAX;
4467 			clear_bit(pending_bit, pending);
4468 			smp_mb__after_clear_bit();
4469 			return rc;
4470 		}
4471 
4472 		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4473 			rc = o->wait_comp(bp, o, pending_bit);
4474 			if (rc)
4475 				return rc;
4476 
4477 			return 0;
4478 		}
4479 	}
4480 
4481 	return !!test_bit(pending_bit, pending);
4482 }
4483 
4484 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4485 				   struct bnx2x_queue_state_params *params)
4486 {
4487 	enum bnx2x_queue_cmd cmd = params->cmd, bit;
4488 
4489 	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4490 	 * UPDATE command.
4491 	 */
4492 	if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4493 	    (cmd == BNX2X_Q_CMD_DEACTIVATE))
4494 		bit = BNX2X_Q_CMD_UPDATE;
4495 	else
4496 		bit = cmd;
4497 
4498 	set_bit(bit, &obj->pending);
4499 	return bit;
4500 }
4501 
4502 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4503 				 struct bnx2x_queue_sp_obj *o,
4504 				 enum bnx2x_queue_cmd cmd)
4505 {
4506 	return bnx2x_state_wait(bp, cmd, &o->pending);
4507 }
4508 
4509 /**
4510  * bnx2x_queue_comp_cmd - complete the state change command.
4511  *
4512  * @bp:		device handle
4513  * @o:
4514  * @cmd:
4515  *
4516  * Checks that the arrived completion is expected.
4517  */
4518 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4519 				struct bnx2x_queue_sp_obj *o,
4520 				enum bnx2x_queue_cmd cmd)
4521 {
4522 	unsigned long cur_pending = o->pending;
4523 
4524 	if (!test_and_clear_bit(cmd, &cur_pending)) {
4525 		BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4526 			  cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4527 			  o->state, cur_pending, o->next_state);
4528 		return -EINVAL;
4529 	}
4530 
4531 	if (o->next_tx_only >= o->max_cos)
4532 		/* >= because tx only must always be smaller than cos since the
4533 		 * primary connection supports COS 0
4534 		 */
4535 		BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4536 			   o->next_tx_only, o->max_cos);
4537 
4538 	DP(BNX2X_MSG_SP,
4539 	   "Completing command %d for queue %d, setting state to %d\n",
4540 	   cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4541 
4542 	if (o->next_tx_only)  /* print num tx-only if any exist */
4543 		DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4544 		   o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4545 
4546 	o->state = o->next_state;
4547 	o->num_tx_only = o->next_tx_only;
4548 	o->next_state = BNX2X_Q_STATE_MAX;
4549 
4550 	/* It's important that o->state and o->next_state are
4551 	 * updated before o->pending.
4552 	 */
4553 	wmb();
4554 
4555 	clear_bit(cmd, &o->pending);
4556 	smp_mb__after_clear_bit();
4557 
4558 	return 0;
4559 }
4560 
4561 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4562 				struct bnx2x_queue_state_params *cmd_params,
4563 				struct client_init_ramrod_data *data)
4564 {
4565 	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4566 
4567 	/* Rx data */
4568 
4569 	/* IPv6 TPA supported for E2 and above only */
4570 	data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4571 				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4572 }
4573 
4574 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4575 				struct bnx2x_queue_sp_obj *o,
4576 				struct bnx2x_general_setup_params *params,
4577 				struct client_init_general_data *gen_data,
4578 				unsigned long *flags)
4579 {
4580 	gen_data->client_id = o->cl_id;
4581 
4582 	if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4583 		gen_data->statistics_counter_id =
4584 					params->stat_id;
4585 		gen_data->statistics_en_flg = 1;
4586 		gen_data->statistics_zero_flg =
4587 			test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4588 	} else
4589 		gen_data->statistics_counter_id =
4590 					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4591 
4592 	gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4593 	gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4594 	gen_data->sp_client_id = params->spcl_id;
4595 	gen_data->mtu = cpu_to_le16(params->mtu);
4596 	gen_data->func_id = o->func_id;
4597 
4598 	gen_data->cos = params->cos;
4599 
4600 	gen_data->traffic_type =
4601 		test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4602 		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4603 
4604 	DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4605 	   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4606 }
4607 
4608 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4609 				struct bnx2x_txq_setup_params *params,
4610 				struct client_init_tx_data *tx_data,
4611 				unsigned long *flags)
4612 {
4613 	tx_data->enforce_security_flg =
4614 		test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4615 	tx_data->default_vlan =
4616 		cpu_to_le16(params->default_vlan);
4617 	tx_data->default_vlan_flg =
4618 		test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4619 	tx_data->tx_switching_flg =
4620 		test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4621 	tx_data->anti_spoofing_flg =
4622 		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4623 	tx_data->force_default_pri_flg =
4624 		test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4625 
4626 	tx_data->tunnel_lso_inc_ip_id =
4627 		test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4628 	tx_data->tunnel_non_lso_pcsum_location =
4629 		test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4630 								  PCSUM_ON_BD;
4631 
4632 	tx_data->tx_status_block_id = params->fw_sb_id;
4633 	tx_data->tx_sb_index_number = params->sb_cq_index;
4634 	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4635 
4636 	tx_data->tx_bd_page_base.lo =
4637 		cpu_to_le32(U64_LO(params->dscr_map));
4638 	tx_data->tx_bd_page_base.hi =
4639 		cpu_to_le32(U64_HI(params->dscr_map));
4640 
4641 	/* Don't configure any Tx switching mode during queue SETUP */
4642 	tx_data->state = 0;
4643 }
4644 
4645 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4646 				struct rxq_pause_params *params,
4647 				struct client_init_rx_data *rx_data)
4648 {
4649 	/* flow control data */
4650 	rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4651 	rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4652 	rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4653 	rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4654 	rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4655 	rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4656 	rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4657 }
4658 
4659 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4660 				struct bnx2x_rxq_setup_params *params,
4661 				struct client_init_rx_data *rx_data,
4662 				unsigned long *flags)
4663 {
4664 	rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4665 				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4666 	rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4667 				CLIENT_INIT_RX_DATA_TPA_MODE;
4668 	rx_data->vmqueue_mode_en_flg = 0;
4669 
4670 	rx_data->cache_line_alignment_log_size =
4671 		params->cache_line_log;
4672 	rx_data->enable_dynamic_hc =
4673 		test_bit(BNX2X_Q_FLG_DHC, flags);
4674 	rx_data->max_sges_for_packet = params->max_sges_pkt;
4675 	rx_data->client_qzone_id = params->cl_qzone_id;
4676 	rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4677 
4678 	/* Always start in DROP_ALL mode */
4679 	rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4680 				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4681 
4682 	/* We don't set drop flags */
4683 	rx_data->drop_ip_cs_err_flg = 0;
4684 	rx_data->drop_tcp_cs_err_flg = 0;
4685 	rx_data->drop_ttl0_flg = 0;
4686 	rx_data->drop_udp_cs_err_flg = 0;
4687 	rx_data->inner_vlan_removal_enable_flg =
4688 		test_bit(BNX2X_Q_FLG_VLAN, flags);
4689 	rx_data->outer_vlan_removal_enable_flg =
4690 		test_bit(BNX2X_Q_FLG_OV, flags);
4691 	rx_data->status_block_id = params->fw_sb_id;
4692 	rx_data->rx_sb_index_number = params->sb_cq_index;
4693 	rx_data->max_tpa_queues = params->max_tpa_queues;
4694 	rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4695 	rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4696 	rx_data->bd_page_base.lo =
4697 		cpu_to_le32(U64_LO(params->dscr_map));
4698 	rx_data->bd_page_base.hi =
4699 		cpu_to_le32(U64_HI(params->dscr_map));
4700 	rx_data->sge_page_base.lo =
4701 		cpu_to_le32(U64_LO(params->sge_map));
4702 	rx_data->sge_page_base.hi =
4703 		cpu_to_le32(U64_HI(params->sge_map));
4704 	rx_data->cqe_page_base.lo =
4705 		cpu_to_le32(U64_LO(params->rcq_map));
4706 	rx_data->cqe_page_base.hi =
4707 		cpu_to_le32(U64_HI(params->rcq_map));
4708 	rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4709 
4710 	if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4711 		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4712 		rx_data->is_approx_mcast = 1;
4713 	}
4714 
4715 	rx_data->rss_engine_id = params->rss_engine_id;
4716 
4717 	/* silent vlan removal */
4718 	rx_data->silent_vlan_removal_flg =
4719 		test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4720 	rx_data->silent_vlan_value =
4721 		cpu_to_le16(params->silent_removal_value);
4722 	rx_data->silent_vlan_mask =
4723 		cpu_to_le16(params->silent_removal_mask);
4724 }
4725 
4726 /* initialize the general, tx and rx parts of a queue object */
4727 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4728 				struct bnx2x_queue_state_params *cmd_params,
4729 				struct client_init_ramrod_data *data)
4730 {
4731 	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4732 				       &cmd_params->params.setup.gen_params,
4733 				       &data->general,
4734 				       &cmd_params->params.setup.flags);
4735 
4736 	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4737 				  &cmd_params->params.setup.txq_params,
4738 				  &data->tx,
4739 				  &cmd_params->params.setup.flags);
4740 
4741 	bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4742 				  &cmd_params->params.setup.rxq_params,
4743 				  &data->rx,
4744 				  &cmd_params->params.setup.flags);
4745 
4746 	bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4747 				     &cmd_params->params.setup.pause_params,
4748 				     &data->rx);
4749 }
4750 
4751 /* initialize the general and tx parts of a tx-only queue object */
4752 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4753 				struct bnx2x_queue_state_params *cmd_params,
4754 				struct tx_queue_init_ramrod_data *data)
4755 {
4756 	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4757 				       &cmd_params->params.tx_only.gen_params,
4758 				       &data->general,
4759 				       &cmd_params->params.tx_only.flags);
4760 
4761 	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4762 				  &cmd_params->params.tx_only.txq_params,
4763 				  &data->tx,
4764 				  &cmd_params->params.tx_only.flags);
4765 
4766 	DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4767 			 cmd_params->q_obj->cids[0],
4768 			 data->tx.tx_bd_page_base.lo,
4769 			 data->tx.tx_bd_page_base.hi);
4770 }
4771 
4772 /**
4773  * bnx2x_q_init - init HW/FW queue
4774  *
4775  * @bp:		device handle
4776  * @params:
4777  *
4778  * HW/FW initial Queue configuration:
4779  *      - HC: Rx and Tx
4780  *      - CDU context validation
4781  *
4782  */
4783 static inline int bnx2x_q_init(struct bnx2x *bp,
4784 			       struct bnx2x_queue_state_params *params)
4785 {
4786 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4787 	struct bnx2x_queue_init_params *init = &params->params.init;
4788 	u16 hc_usec;
4789 	u8 cos;
4790 
4791 	/* Tx HC configuration */
4792 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4793 	    test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4794 		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4795 
4796 		bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4797 			init->tx.sb_cq_index,
4798 			!test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4799 			hc_usec);
4800 	}
4801 
4802 	/* Rx HC configuration */
4803 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4804 	    test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4805 		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4806 
4807 		bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4808 			init->rx.sb_cq_index,
4809 			!test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4810 			hc_usec);
4811 	}
4812 
4813 	/* Set CDU context validation values */
4814 	for (cos = 0; cos < o->max_cos; cos++) {
4815 		DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4816 				 o->cids[cos], cos);
4817 		DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4818 		bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4819 	}
4820 
4821 	/* As no ramrod is sent, complete the command immediately  */
4822 	o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4823 
4824 	mmiowb();
4825 	smp_mb();
4826 
4827 	return 0;
4828 }
4829 
4830 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4831 					struct bnx2x_queue_state_params *params)
4832 {
4833 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4834 	struct client_init_ramrod_data *rdata =
4835 		(struct client_init_ramrod_data *)o->rdata;
4836 	dma_addr_t data_mapping = o->rdata_mapping;
4837 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4838 
4839 	/* Clear the ramrod data */
4840 	memset(rdata, 0, sizeof(*rdata));
4841 
4842 	/* Fill the ramrod data */
4843 	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4844 
4845 	/* No need for an explicit memory barrier here as long we would
4846 	 * need to ensure the ordering of writing to the SPQ element
4847 	 * and updating of the SPQ producer which involves a memory
4848 	 * read and we will have to put a full memory barrier there
4849 	 * (inside bnx2x_sp_post()).
4850 	 */
4851 
4852 	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4853 			     U64_HI(data_mapping),
4854 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4855 }
4856 
4857 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4858 					struct bnx2x_queue_state_params *params)
4859 {
4860 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4861 	struct client_init_ramrod_data *rdata =
4862 		(struct client_init_ramrod_data *)o->rdata;
4863 	dma_addr_t data_mapping = o->rdata_mapping;
4864 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4865 
4866 	/* Clear the ramrod data */
4867 	memset(rdata, 0, sizeof(*rdata));
4868 
4869 	/* Fill the ramrod data */
4870 	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4871 	bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4872 
4873 	/* No need for an explicit memory barrier here as long we would
4874 	 * need to ensure the ordering of writing to the SPQ element
4875 	 * and updating of the SPQ producer which involves a memory
4876 	 * read and we will have to put a full memory barrier there
4877 	 * (inside bnx2x_sp_post()).
4878 	 */
4879 
4880 	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4881 			     U64_HI(data_mapping),
4882 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4883 }
4884 
4885 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4886 				  struct bnx2x_queue_state_params *params)
4887 {
4888 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4889 	struct tx_queue_init_ramrod_data *rdata =
4890 		(struct tx_queue_init_ramrod_data *)o->rdata;
4891 	dma_addr_t data_mapping = o->rdata_mapping;
4892 	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4893 	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4894 		&params->params.tx_only;
4895 	u8 cid_index = tx_only_params->cid_index;
4896 
4897 	if (cid_index >= o->max_cos) {
4898 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4899 			  o->cl_id, cid_index);
4900 		return -EINVAL;
4901 	}
4902 
4903 	DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4904 			 tx_only_params->gen_params.cos,
4905 			 tx_only_params->gen_params.spcl_id);
4906 
4907 	/* Clear the ramrod data */
4908 	memset(rdata, 0, sizeof(*rdata));
4909 
4910 	/* Fill the ramrod data */
4911 	bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4912 
4913 	DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4914 			 o->cids[cid_index], rdata->general.client_id,
4915 			 rdata->general.sp_client_id, rdata->general.cos);
4916 
4917 	/* No need for an explicit memory barrier here as long we would
4918 	 * need to ensure the ordering of writing to the SPQ element
4919 	 * and updating of the SPQ producer which involves a memory
4920 	 * read and we will have to put a full memory barrier there
4921 	 * (inside bnx2x_sp_post()).
4922 	 */
4923 
4924 	return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4925 			     U64_HI(data_mapping),
4926 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4927 }
4928 
4929 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4930 				     struct bnx2x_queue_sp_obj *obj,
4931 				     struct bnx2x_queue_update_params *params,
4932 				     struct client_update_ramrod_data *data)
4933 {
4934 	/* Client ID of the client to update */
4935 	data->client_id = obj->cl_id;
4936 
4937 	/* Function ID of the client to update */
4938 	data->func_id = obj->func_id;
4939 
4940 	/* Default VLAN value */
4941 	data->default_vlan = cpu_to_le16(params->def_vlan);
4942 
4943 	/* Inner VLAN stripping */
4944 	data->inner_vlan_removal_enable_flg =
4945 		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4946 	data->inner_vlan_removal_change_flg =
4947 		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4948 			 &params->update_flags);
4949 
4950 	/* Outer VLAN stripping */
4951 	data->outer_vlan_removal_enable_flg =
4952 		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4953 	data->outer_vlan_removal_change_flg =
4954 		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4955 			 &params->update_flags);
4956 
4957 	/* Drop packets that have source MAC that doesn't belong to this
4958 	 * Queue.
4959 	 */
4960 	data->anti_spoofing_enable_flg =
4961 		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4962 	data->anti_spoofing_change_flg =
4963 		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4964 
4965 	/* Activate/Deactivate */
4966 	data->activate_flg =
4967 		test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4968 	data->activate_change_flg =
4969 		test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4970 
4971 	/* Enable default VLAN */
4972 	data->default_vlan_enable_flg =
4973 		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4974 	data->default_vlan_change_flg =
4975 		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4976 			 &params->update_flags);
4977 
4978 	/* silent vlan removal */
4979 	data->silent_vlan_change_flg =
4980 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4981 			 &params->update_flags);
4982 	data->silent_vlan_removal_flg =
4983 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4984 	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4985 	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4986 }
4987 
4988 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4989 				      struct bnx2x_queue_state_params *params)
4990 {
4991 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4992 	struct client_update_ramrod_data *rdata =
4993 		(struct client_update_ramrod_data *)o->rdata;
4994 	dma_addr_t data_mapping = o->rdata_mapping;
4995 	struct bnx2x_queue_update_params *update_params =
4996 		&params->params.update;
4997 	u8 cid_index = update_params->cid_index;
4998 
4999 	if (cid_index >= o->max_cos) {
5000 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5001 			  o->cl_id, cid_index);
5002 		return -EINVAL;
5003 	}
5004 
5005 	/* Clear the ramrod data */
5006 	memset(rdata, 0, sizeof(*rdata));
5007 
5008 	/* Fill the ramrod data */
5009 	bnx2x_q_fill_update_data(bp, o, update_params, rdata);
5010 
5011 	/* No need for an explicit memory barrier here as long we would
5012 	 * need to ensure the ordering of writing to the SPQ element
5013 	 * and updating of the SPQ producer which involves a memory
5014 	 * read and we will have to put a full memory barrier there
5015 	 * (inside bnx2x_sp_post()).
5016 	 */
5017 
5018 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5019 			     o->cids[cid_index], U64_HI(data_mapping),
5020 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
5021 }
5022 
5023 /**
5024  * bnx2x_q_send_deactivate - send DEACTIVATE command
5025  *
5026  * @bp:		device handle
5027  * @params:
5028  *
5029  * implemented using the UPDATE command.
5030  */
5031 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
5032 					struct bnx2x_queue_state_params *params)
5033 {
5034 	struct bnx2x_queue_update_params *update = &params->params.update;
5035 
5036 	memset(update, 0, sizeof(*update));
5037 
5038 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5039 
5040 	return bnx2x_q_send_update(bp, params);
5041 }
5042 
5043 /**
5044  * bnx2x_q_send_activate - send ACTIVATE command
5045  *
5046  * @bp:		device handle
5047  * @params:
5048  *
5049  * implemented using the UPDATE command.
5050  */
5051 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
5052 					struct bnx2x_queue_state_params *params)
5053 {
5054 	struct bnx2x_queue_update_params *update = &params->params.update;
5055 
5056 	memset(update, 0, sizeof(*update));
5057 
5058 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
5059 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5060 
5061 	return bnx2x_q_send_update(bp, params);
5062 }
5063 
5064 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
5065 					struct bnx2x_queue_state_params *params)
5066 {
5067 	/* TODO: Not implemented yet. */
5068 	return -1;
5069 }
5070 
5071 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
5072 				    struct bnx2x_queue_state_params *params)
5073 {
5074 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5075 
5076 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
5077 			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
5078 			     ETH_CONNECTION_TYPE);
5079 }
5080 
5081 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
5082 				       struct bnx2x_queue_state_params *params)
5083 {
5084 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5085 	u8 cid_idx = params->params.cfc_del.cid_index;
5086 
5087 	if (cid_idx >= o->max_cos) {
5088 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5089 			  o->cl_id, cid_idx);
5090 		return -EINVAL;
5091 	}
5092 
5093 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
5094 			     o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
5095 }
5096 
5097 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
5098 					struct bnx2x_queue_state_params *params)
5099 {
5100 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5101 	u8 cid_index = params->params.terminate.cid_index;
5102 
5103 	if (cid_index >= o->max_cos) {
5104 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5105 			  o->cl_id, cid_index);
5106 		return -EINVAL;
5107 	}
5108 
5109 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
5110 			     o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
5111 }
5112 
5113 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
5114 				     struct bnx2x_queue_state_params *params)
5115 {
5116 	struct bnx2x_queue_sp_obj *o = params->q_obj;
5117 
5118 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
5119 			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
5120 			     ETH_CONNECTION_TYPE);
5121 }
5122 
5123 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
5124 					struct bnx2x_queue_state_params *params)
5125 {
5126 	switch (params->cmd) {
5127 	case BNX2X_Q_CMD_INIT:
5128 		return bnx2x_q_init(bp, params);
5129 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
5130 		return bnx2x_q_send_setup_tx_only(bp, params);
5131 	case BNX2X_Q_CMD_DEACTIVATE:
5132 		return bnx2x_q_send_deactivate(bp, params);
5133 	case BNX2X_Q_CMD_ACTIVATE:
5134 		return bnx2x_q_send_activate(bp, params);
5135 	case BNX2X_Q_CMD_UPDATE:
5136 		return bnx2x_q_send_update(bp, params);
5137 	case BNX2X_Q_CMD_UPDATE_TPA:
5138 		return bnx2x_q_send_update_tpa(bp, params);
5139 	case BNX2X_Q_CMD_HALT:
5140 		return bnx2x_q_send_halt(bp, params);
5141 	case BNX2X_Q_CMD_CFC_DEL:
5142 		return bnx2x_q_send_cfc_del(bp, params);
5143 	case BNX2X_Q_CMD_TERMINATE:
5144 		return bnx2x_q_send_terminate(bp, params);
5145 	case BNX2X_Q_CMD_EMPTY:
5146 		return bnx2x_q_send_empty(bp, params);
5147 	default:
5148 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5149 		return -EINVAL;
5150 	}
5151 }
5152 
5153 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
5154 				    struct bnx2x_queue_state_params *params)
5155 {
5156 	switch (params->cmd) {
5157 	case BNX2X_Q_CMD_SETUP:
5158 		return bnx2x_q_send_setup_e1x(bp, params);
5159 	case BNX2X_Q_CMD_INIT:
5160 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
5161 	case BNX2X_Q_CMD_DEACTIVATE:
5162 	case BNX2X_Q_CMD_ACTIVATE:
5163 	case BNX2X_Q_CMD_UPDATE:
5164 	case BNX2X_Q_CMD_UPDATE_TPA:
5165 	case BNX2X_Q_CMD_HALT:
5166 	case BNX2X_Q_CMD_CFC_DEL:
5167 	case BNX2X_Q_CMD_TERMINATE:
5168 	case BNX2X_Q_CMD_EMPTY:
5169 		return bnx2x_queue_send_cmd_cmn(bp, params);
5170 	default:
5171 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5172 		return -EINVAL;
5173 	}
5174 }
5175 
5176 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
5177 				   struct bnx2x_queue_state_params *params)
5178 {
5179 	switch (params->cmd) {
5180 	case BNX2X_Q_CMD_SETUP:
5181 		return bnx2x_q_send_setup_e2(bp, params);
5182 	case BNX2X_Q_CMD_INIT:
5183 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
5184 	case BNX2X_Q_CMD_DEACTIVATE:
5185 	case BNX2X_Q_CMD_ACTIVATE:
5186 	case BNX2X_Q_CMD_UPDATE:
5187 	case BNX2X_Q_CMD_UPDATE_TPA:
5188 	case BNX2X_Q_CMD_HALT:
5189 	case BNX2X_Q_CMD_CFC_DEL:
5190 	case BNX2X_Q_CMD_TERMINATE:
5191 	case BNX2X_Q_CMD_EMPTY:
5192 		return bnx2x_queue_send_cmd_cmn(bp, params);
5193 	default:
5194 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5195 		return -EINVAL;
5196 	}
5197 }
5198 
5199 /**
5200  * bnx2x_queue_chk_transition - check state machine of a regular Queue
5201  *
5202  * @bp:		device handle
5203  * @o:
5204  * @params:
5205  *
5206  * (not Forwarding)
5207  * It both checks if the requested command is legal in a current
5208  * state and, if it's legal, sets a `next_state' in the object
5209  * that will be used in the completion flow to set the `state'
5210  * of the object.
5211  *
5212  * returns 0 if a requested command is a legal transition,
5213  *         -EINVAL otherwise.
5214  */
5215 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5216 				      struct bnx2x_queue_sp_obj *o,
5217 				      struct bnx2x_queue_state_params *params)
5218 {
5219 	enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5220 	enum bnx2x_queue_cmd cmd = params->cmd;
5221 	struct bnx2x_queue_update_params *update_params =
5222 		 &params->params.update;
5223 	u8 next_tx_only = o->num_tx_only;
5224 
5225 	/* Forget all pending for completion commands if a driver only state
5226 	 * transition has been requested.
5227 	 */
5228 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5229 		o->pending = 0;
5230 		o->next_state = BNX2X_Q_STATE_MAX;
5231 	}
5232 
5233 	/* Don't allow a next state transition if we are in the middle of
5234 	 * the previous one.
5235 	 */
5236 	if (o->pending) {
5237 		BNX2X_ERR("Blocking transition since pending was %lx\n",
5238 			  o->pending);
5239 		return -EBUSY;
5240 	}
5241 
5242 	switch (state) {
5243 	case BNX2X_Q_STATE_RESET:
5244 		if (cmd == BNX2X_Q_CMD_INIT)
5245 			next_state = BNX2X_Q_STATE_INITIALIZED;
5246 
5247 		break;
5248 	case BNX2X_Q_STATE_INITIALIZED:
5249 		if (cmd == BNX2X_Q_CMD_SETUP) {
5250 			if (test_bit(BNX2X_Q_FLG_ACTIVE,
5251 				     &params->params.setup.flags))
5252 				next_state = BNX2X_Q_STATE_ACTIVE;
5253 			else
5254 				next_state = BNX2X_Q_STATE_INACTIVE;
5255 		}
5256 
5257 		break;
5258 	case BNX2X_Q_STATE_ACTIVE:
5259 		if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5260 			next_state = BNX2X_Q_STATE_INACTIVE;
5261 
5262 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5263 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5264 			next_state = BNX2X_Q_STATE_ACTIVE;
5265 
5266 		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5267 			next_state = BNX2X_Q_STATE_MULTI_COS;
5268 			next_tx_only = 1;
5269 		}
5270 
5271 		else if (cmd == BNX2X_Q_CMD_HALT)
5272 			next_state = BNX2X_Q_STATE_STOPPED;
5273 
5274 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5275 			/* If "active" state change is requested, update the
5276 			 *  state accordingly.
5277 			 */
5278 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5279 				     &update_params->update_flags) &&
5280 			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5281 				      &update_params->update_flags))
5282 				next_state = BNX2X_Q_STATE_INACTIVE;
5283 			else
5284 				next_state = BNX2X_Q_STATE_ACTIVE;
5285 		}
5286 
5287 		break;
5288 	case BNX2X_Q_STATE_MULTI_COS:
5289 		if (cmd == BNX2X_Q_CMD_TERMINATE)
5290 			next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5291 
5292 		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5293 			next_state = BNX2X_Q_STATE_MULTI_COS;
5294 			next_tx_only = o->num_tx_only + 1;
5295 		}
5296 
5297 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5298 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5299 			next_state = BNX2X_Q_STATE_MULTI_COS;
5300 
5301 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5302 			/* If "active" state change is requested, update the
5303 			 *  state accordingly.
5304 			 */
5305 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5306 				     &update_params->update_flags) &&
5307 			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5308 				      &update_params->update_flags))
5309 				next_state = BNX2X_Q_STATE_INACTIVE;
5310 			else
5311 				next_state = BNX2X_Q_STATE_MULTI_COS;
5312 		}
5313 
5314 		break;
5315 	case BNX2X_Q_STATE_MCOS_TERMINATED:
5316 		if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5317 			next_tx_only = o->num_tx_only - 1;
5318 			if (next_tx_only == 0)
5319 				next_state = BNX2X_Q_STATE_ACTIVE;
5320 			else
5321 				next_state = BNX2X_Q_STATE_MULTI_COS;
5322 		}
5323 
5324 		break;
5325 	case BNX2X_Q_STATE_INACTIVE:
5326 		if (cmd == BNX2X_Q_CMD_ACTIVATE)
5327 			next_state = BNX2X_Q_STATE_ACTIVE;
5328 
5329 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5330 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5331 			next_state = BNX2X_Q_STATE_INACTIVE;
5332 
5333 		else if (cmd == BNX2X_Q_CMD_HALT)
5334 			next_state = BNX2X_Q_STATE_STOPPED;
5335 
5336 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5337 			/* If "active" state change is requested, update the
5338 			 * state accordingly.
5339 			 */
5340 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5341 				     &update_params->update_flags) &&
5342 			    test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5343 				     &update_params->update_flags)){
5344 				if (o->num_tx_only == 0)
5345 					next_state = BNX2X_Q_STATE_ACTIVE;
5346 				else /* tx only queues exist for this queue */
5347 					next_state = BNX2X_Q_STATE_MULTI_COS;
5348 			} else
5349 				next_state = BNX2X_Q_STATE_INACTIVE;
5350 		}
5351 
5352 		break;
5353 	case BNX2X_Q_STATE_STOPPED:
5354 		if (cmd == BNX2X_Q_CMD_TERMINATE)
5355 			next_state = BNX2X_Q_STATE_TERMINATED;
5356 
5357 		break;
5358 	case BNX2X_Q_STATE_TERMINATED:
5359 		if (cmd == BNX2X_Q_CMD_CFC_DEL)
5360 			next_state = BNX2X_Q_STATE_RESET;
5361 
5362 		break;
5363 	default:
5364 		BNX2X_ERR("Illegal state: %d\n", state);
5365 	}
5366 
5367 	/* Transition is assured */
5368 	if (next_state != BNX2X_Q_STATE_MAX) {
5369 		DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5370 				 state, cmd, next_state);
5371 		o->next_state = next_state;
5372 		o->next_tx_only = next_tx_only;
5373 		return 0;
5374 	}
5375 
5376 	DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5377 
5378 	return -EINVAL;
5379 }
5380 
5381 void bnx2x_init_queue_obj(struct bnx2x *bp,
5382 			  struct bnx2x_queue_sp_obj *obj,
5383 			  u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5384 			  void *rdata,
5385 			  dma_addr_t rdata_mapping, unsigned long type)
5386 {
5387 	memset(obj, 0, sizeof(*obj));
5388 
5389 	/* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5390 	BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5391 
5392 	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5393 	obj->max_cos = cid_cnt;
5394 	obj->cl_id = cl_id;
5395 	obj->func_id = func_id;
5396 	obj->rdata = rdata;
5397 	obj->rdata_mapping = rdata_mapping;
5398 	obj->type = type;
5399 	obj->next_state = BNX2X_Q_STATE_MAX;
5400 
5401 	if (CHIP_IS_E1x(bp))
5402 		obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5403 	else
5404 		obj->send_cmd = bnx2x_queue_send_cmd_e2;
5405 
5406 	obj->check_transition = bnx2x_queue_chk_transition;
5407 
5408 	obj->complete_cmd = bnx2x_queue_comp_cmd;
5409 	obj->wait_comp = bnx2x_queue_wait_comp;
5410 	obj->set_pending = bnx2x_queue_set_pending;
5411 }
5412 
5413 /* return a queue object's logical state*/
5414 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5415 			       struct bnx2x_queue_sp_obj *obj)
5416 {
5417 	switch (obj->state) {
5418 	case BNX2X_Q_STATE_ACTIVE:
5419 	case BNX2X_Q_STATE_MULTI_COS:
5420 		return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5421 	case BNX2X_Q_STATE_RESET:
5422 	case BNX2X_Q_STATE_INITIALIZED:
5423 	case BNX2X_Q_STATE_MCOS_TERMINATED:
5424 	case BNX2X_Q_STATE_INACTIVE:
5425 	case BNX2X_Q_STATE_STOPPED:
5426 	case BNX2X_Q_STATE_TERMINATED:
5427 	case BNX2X_Q_STATE_FLRED:
5428 		return BNX2X_Q_LOGICAL_STATE_STOPPED;
5429 	default:
5430 		return -EINVAL;
5431 	}
5432 }
5433 
5434 /********************** Function state object *********************************/
5435 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5436 					   struct bnx2x_func_sp_obj *o)
5437 {
5438 	/* in the middle of transaction - return INVALID state */
5439 	if (o->pending)
5440 		return BNX2X_F_STATE_MAX;
5441 
5442 	/* unsure the order of reading of o->pending and o->state
5443 	 * o->pending should be read first
5444 	 */
5445 	rmb();
5446 
5447 	return o->state;
5448 }
5449 
5450 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5451 				struct bnx2x_func_sp_obj *o,
5452 				enum bnx2x_func_cmd cmd)
5453 {
5454 	return bnx2x_state_wait(bp, cmd, &o->pending);
5455 }
5456 
5457 /**
5458  * bnx2x_func_state_change_comp - complete the state machine transition
5459  *
5460  * @bp:		device handle
5461  * @o:
5462  * @cmd:
5463  *
5464  * Called on state change transition. Completes the state
5465  * machine transition only - no HW interaction.
5466  */
5467 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5468 					       struct bnx2x_func_sp_obj *o,
5469 					       enum bnx2x_func_cmd cmd)
5470 {
5471 	unsigned long cur_pending = o->pending;
5472 
5473 	if (!test_and_clear_bit(cmd, &cur_pending)) {
5474 		BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5475 			  cmd, BP_FUNC(bp), o->state,
5476 			  cur_pending, o->next_state);
5477 		return -EINVAL;
5478 	}
5479 
5480 	DP(BNX2X_MSG_SP,
5481 	   "Completing command %d for func %d, setting state to %d\n",
5482 	   cmd, BP_FUNC(bp), o->next_state);
5483 
5484 	o->state = o->next_state;
5485 	o->next_state = BNX2X_F_STATE_MAX;
5486 
5487 	/* It's important that o->state and o->next_state are
5488 	 * updated before o->pending.
5489 	 */
5490 	wmb();
5491 
5492 	clear_bit(cmd, &o->pending);
5493 	smp_mb__after_clear_bit();
5494 
5495 	return 0;
5496 }
5497 
5498 /**
5499  * bnx2x_func_comp_cmd - complete the state change command
5500  *
5501  * @bp:		device handle
5502  * @o:
5503  * @cmd:
5504  *
5505  * Checks that the arrived completion is expected.
5506  */
5507 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5508 			       struct bnx2x_func_sp_obj *o,
5509 			       enum bnx2x_func_cmd cmd)
5510 {
5511 	/* Complete the state machine part first, check if it's a
5512 	 * legal completion.
5513 	 */
5514 	int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5515 	return rc;
5516 }
5517 
5518 /**
5519  * bnx2x_func_chk_transition - perform function state machine transition
5520  *
5521  * @bp:		device handle
5522  * @o:
5523  * @params:
5524  *
5525  * It both checks if the requested command is legal in a current
5526  * state and, if it's legal, sets a `next_state' in the object
5527  * that will be used in the completion flow to set the `state'
5528  * of the object.
5529  *
5530  * returns 0 if a requested command is a legal transition,
5531  *         -EINVAL otherwise.
5532  */
5533 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5534 				     struct bnx2x_func_sp_obj *o,
5535 				     struct bnx2x_func_state_params *params)
5536 {
5537 	enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5538 	enum bnx2x_func_cmd cmd = params->cmd;
5539 
5540 	/* Forget all pending for completion commands if a driver only state
5541 	 * transition has been requested.
5542 	 */
5543 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5544 		o->pending = 0;
5545 		o->next_state = BNX2X_F_STATE_MAX;
5546 	}
5547 
5548 	/* Don't allow a next state transition if we are in the middle of
5549 	 * the previous one.
5550 	 */
5551 	if (o->pending)
5552 		return -EBUSY;
5553 
5554 	switch (state) {
5555 	case BNX2X_F_STATE_RESET:
5556 		if (cmd == BNX2X_F_CMD_HW_INIT)
5557 			next_state = BNX2X_F_STATE_INITIALIZED;
5558 
5559 		break;
5560 	case BNX2X_F_STATE_INITIALIZED:
5561 		if (cmd == BNX2X_F_CMD_START)
5562 			next_state = BNX2X_F_STATE_STARTED;
5563 
5564 		else if (cmd == BNX2X_F_CMD_HW_RESET)
5565 			next_state = BNX2X_F_STATE_RESET;
5566 
5567 		break;
5568 	case BNX2X_F_STATE_STARTED:
5569 		if (cmd == BNX2X_F_CMD_STOP)
5570 			next_state = BNX2X_F_STATE_INITIALIZED;
5571 		/* afex ramrods can be sent only in started mode, and only
5572 		 * if not pending for function_stop ramrod completion
5573 		 * for these events - next state remained STARTED.
5574 		 */
5575 		else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5576 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5577 			next_state = BNX2X_F_STATE_STARTED;
5578 
5579 		else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5580 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5581 			next_state = BNX2X_F_STATE_STARTED;
5582 
5583 		/* Switch_update ramrod can be sent in either started or
5584 		 * tx_stopped state, and it doesn't change the state.
5585 		 */
5586 		else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5587 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5588 			next_state = BNX2X_F_STATE_STARTED;
5589 
5590 		else if (cmd == BNX2X_F_CMD_TX_STOP)
5591 			next_state = BNX2X_F_STATE_TX_STOPPED;
5592 
5593 		break;
5594 	case BNX2X_F_STATE_TX_STOPPED:
5595 		if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5596 		    (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5597 			next_state = BNX2X_F_STATE_TX_STOPPED;
5598 
5599 		else if (cmd == BNX2X_F_CMD_TX_START)
5600 			next_state = BNX2X_F_STATE_STARTED;
5601 
5602 		break;
5603 	default:
5604 		BNX2X_ERR("Unknown state: %d\n", state);
5605 	}
5606 
5607 	/* Transition is assured */
5608 	if (next_state != BNX2X_F_STATE_MAX) {
5609 		DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5610 				 state, cmd, next_state);
5611 		o->next_state = next_state;
5612 		return 0;
5613 	}
5614 
5615 	DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5616 			 state, cmd);
5617 
5618 	return -EINVAL;
5619 }
5620 
5621 /**
5622  * bnx2x_func_init_func - performs HW init at function stage
5623  *
5624  * @bp:		device handle
5625  * @drv:
5626  *
5627  * Init HW when the current phase is
5628  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5629  * HW blocks.
5630  */
5631 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5632 				       const struct bnx2x_func_sp_drv_ops *drv)
5633 {
5634 	return drv->init_hw_func(bp);
5635 }
5636 
5637 /**
5638  * bnx2x_func_init_port - performs HW init at port stage
5639  *
5640  * @bp:		device handle
5641  * @drv:
5642  *
5643  * Init HW when the current phase is
5644  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5645  * FUNCTION-only HW blocks.
5646  *
5647  */
5648 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5649 				       const struct bnx2x_func_sp_drv_ops *drv)
5650 {
5651 	int rc = drv->init_hw_port(bp);
5652 	if (rc)
5653 		return rc;
5654 
5655 	return bnx2x_func_init_func(bp, drv);
5656 }
5657 
5658 /**
5659  * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5660  *
5661  * @bp:		device handle
5662  * @drv:
5663  *
5664  * Init HW when the current phase is
5665  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5666  * PORT-only and FUNCTION-only HW blocks.
5667  */
5668 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5669 					const struct bnx2x_func_sp_drv_ops *drv)
5670 {
5671 	int rc = drv->init_hw_cmn_chip(bp);
5672 	if (rc)
5673 		return rc;
5674 
5675 	return bnx2x_func_init_port(bp, drv);
5676 }
5677 
5678 /**
5679  * bnx2x_func_init_cmn - performs HW init at common stage
5680  *
5681  * @bp:		device handle
5682  * @drv:
5683  *
5684  * Init HW when the current phase is
5685  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5686  * PORT-only and FUNCTION-only HW blocks.
5687  */
5688 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5689 				      const struct bnx2x_func_sp_drv_ops *drv)
5690 {
5691 	int rc = drv->init_hw_cmn(bp);
5692 	if (rc)
5693 		return rc;
5694 
5695 	return bnx2x_func_init_port(bp, drv);
5696 }
5697 
5698 static int bnx2x_func_hw_init(struct bnx2x *bp,
5699 			      struct bnx2x_func_state_params *params)
5700 {
5701 	u32 load_code = params->params.hw_init.load_phase;
5702 	struct bnx2x_func_sp_obj *o = params->f_obj;
5703 	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5704 	int rc = 0;
5705 
5706 	DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5707 			 BP_ABS_FUNC(bp), load_code);
5708 
5709 	/* Prepare buffers for unzipping the FW */
5710 	rc = drv->gunzip_init(bp);
5711 	if (rc)
5712 		return rc;
5713 
5714 	/* Prepare FW */
5715 	rc = drv->init_fw(bp);
5716 	if (rc) {
5717 		BNX2X_ERR("Error loading firmware\n");
5718 		goto init_err;
5719 	}
5720 
5721 	/* Handle the beginning of COMMON_XXX pases separately... */
5722 	switch (load_code) {
5723 	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5724 		rc = bnx2x_func_init_cmn_chip(bp, drv);
5725 		if (rc)
5726 			goto init_err;
5727 
5728 		break;
5729 	case FW_MSG_CODE_DRV_LOAD_COMMON:
5730 		rc = bnx2x_func_init_cmn(bp, drv);
5731 		if (rc)
5732 			goto init_err;
5733 
5734 		break;
5735 	case FW_MSG_CODE_DRV_LOAD_PORT:
5736 		rc = bnx2x_func_init_port(bp, drv);
5737 		if (rc)
5738 			goto init_err;
5739 
5740 		break;
5741 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5742 		rc = bnx2x_func_init_func(bp, drv);
5743 		if (rc)
5744 			goto init_err;
5745 
5746 		break;
5747 	default:
5748 		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5749 		rc = -EINVAL;
5750 	}
5751 
5752 init_err:
5753 	drv->gunzip_end(bp);
5754 
5755 	/* In case of success, complete the command immediately: no ramrods
5756 	 * have been sent.
5757 	 */
5758 	if (!rc)
5759 		o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5760 
5761 	return rc;
5762 }
5763 
5764 /**
5765  * bnx2x_func_reset_func - reset HW at function stage
5766  *
5767  * @bp:		device handle
5768  * @drv:
5769  *
5770  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5771  * FUNCTION-only HW blocks.
5772  */
5773 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5774 					const struct bnx2x_func_sp_drv_ops *drv)
5775 {
5776 	drv->reset_hw_func(bp);
5777 }
5778 
5779 /**
5780  * bnx2x_func_reset_port - reset HW at port stage
5781  *
5782  * @bp:		device handle
5783  * @drv:
5784  *
5785  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5786  * FUNCTION-only and PORT-only HW blocks.
5787  *
5788  *                 !!!IMPORTANT!!!
5789  *
5790  * It's important to call reset_port before reset_func() as the last thing
5791  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5792  * makes impossible any DMAE transactions.
5793  */
5794 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5795 					const struct bnx2x_func_sp_drv_ops *drv)
5796 {
5797 	drv->reset_hw_port(bp);
5798 	bnx2x_func_reset_func(bp, drv);
5799 }
5800 
5801 /**
5802  * bnx2x_func_reset_cmn - reset HW at common stage
5803  *
5804  * @bp:		device handle
5805  * @drv:
5806  *
5807  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5808  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5809  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5810  */
5811 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5812 					const struct bnx2x_func_sp_drv_ops *drv)
5813 {
5814 	bnx2x_func_reset_port(bp, drv);
5815 	drv->reset_hw_cmn(bp);
5816 }
5817 
5818 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5819 				      struct bnx2x_func_state_params *params)
5820 {
5821 	u32 reset_phase = params->params.hw_reset.reset_phase;
5822 	struct bnx2x_func_sp_obj *o = params->f_obj;
5823 	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5824 
5825 	DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5826 			 reset_phase);
5827 
5828 	switch (reset_phase) {
5829 	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5830 		bnx2x_func_reset_cmn(bp, drv);
5831 		break;
5832 	case FW_MSG_CODE_DRV_UNLOAD_PORT:
5833 		bnx2x_func_reset_port(bp, drv);
5834 		break;
5835 	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5836 		bnx2x_func_reset_func(bp, drv);
5837 		break;
5838 	default:
5839 		BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5840 			   reset_phase);
5841 		break;
5842 	}
5843 
5844 	/* Complete the command immediately: no ramrods have been sent. */
5845 	o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5846 
5847 	return 0;
5848 }
5849 
5850 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5851 					struct bnx2x_func_state_params *params)
5852 {
5853 	struct bnx2x_func_sp_obj *o = params->f_obj;
5854 	struct function_start_data *rdata =
5855 		(struct function_start_data *)o->rdata;
5856 	dma_addr_t data_mapping = o->rdata_mapping;
5857 	struct bnx2x_func_start_params *start_params = &params->params.start;
5858 
5859 	memset(rdata, 0, sizeof(*rdata));
5860 
5861 	/* Fill the ramrod data with provided parameters */
5862 	rdata->function_mode	= (u8)start_params->mf_mode;
5863 	rdata->sd_vlan_tag	= cpu_to_le16(start_params->sd_vlan_tag);
5864 	rdata->path_id		= BP_PATH(bp);
5865 	rdata->network_cos_mode	= start_params->network_cos_mode;
5866 	rdata->gre_tunnel_mode	= start_params->gre_tunnel_mode;
5867 	rdata->gre_tunnel_rss	= start_params->gre_tunnel_rss;
5868 
5869 	/* No need for an explicit memory barrier here as long we would
5870 	 * need to ensure the ordering of writing to the SPQ element
5871 	 * and updating of the SPQ producer which involves a memory
5872 	 * read and we will have to put a full memory barrier there
5873 	 * (inside bnx2x_sp_post()).
5874 	 */
5875 
5876 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5877 			     U64_HI(data_mapping),
5878 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5879 }
5880 
5881 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5882 					struct bnx2x_func_state_params *params)
5883 {
5884 	struct bnx2x_func_sp_obj *o = params->f_obj;
5885 	struct function_update_data *rdata =
5886 		(struct function_update_data *)o->rdata;
5887 	dma_addr_t data_mapping = o->rdata_mapping;
5888 	struct bnx2x_func_switch_update_params *switch_update_params =
5889 		&params->params.switch_update;
5890 
5891 	memset(rdata, 0, sizeof(*rdata));
5892 
5893 	/* Fill the ramrod data with provided parameters */
5894 	rdata->tx_switch_suspend_change_flg = 1;
5895 	rdata->tx_switch_suspend = switch_update_params->suspend;
5896 	rdata->echo = SWITCH_UPDATE;
5897 
5898 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5899 			     U64_HI(data_mapping),
5900 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5901 }
5902 
5903 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5904 					 struct bnx2x_func_state_params *params)
5905 {
5906 	struct bnx2x_func_sp_obj *o = params->f_obj;
5907 	struct function_update_data *rdata =
5908 		(struct function_update_data *)o->afex_rdata;
5909 	dma_addr_t data_mapping = o->afex_rdata_mapping;
5910 	struct bnx2x_func_afex_update_params *afex_update_params =
5911 		&params->params.afex_update;
5912 
5913 	memset(rdata, 0, sizeof(*rdata));
5914 
5915 	/* Fill the ramrod data with provided parameters */
5916 	rdata->vif_id_change_flg = 1;
5917 	rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5918 	rdata->afex_default_vlan_change_flg = 1;
5919 	rdata->afex_default_vlan =
5920 		cpu_to_le16(afex_update_params->afex_default_vlan);
5921 	rdata->allowed_priorities_change_flg = 1;
5922 	rdata->allowed_priorities = afex_update_params->allowed_priorities;
5923 	rdata->echo = AFEX_UPDATE;
5924 
5925 	/*  No need for an explicit memory barrier here as long we would
5926 	 *  need to ensure the ordering of writing to the SPQ element
5927 	 *  and updating of the SPQ producer which involves a memory
5928 	 *  read and we will have to put a full memory barrier there
5929 	 *  (inside bnx2x_sp_post()).
5930 	 */
5931 	DP(BNX2X_MSG_SP,
5932 	   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5933 	   rdata->vif_id,
5934 	   rdata->afex_default_vlan, rdata->allowed_priorities);
5935 
5936 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5937 			     U64_HI(data_mapping),
5938 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5939 }
5940 
5941 static
5942 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5943 					 struct bnx2x_func_state_params *params)
5944 {
5945 	struct bnx2x_func_sp_obj *o = params->f_obj;
5946 	struct afex_vif_list_ramrod_data *rdata =
5947 		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
5948 	struct bnx2x_func_afex_viflists_params *afex_vif_params =
5949 		&params->params.afex_viflists;
5950 	u64 *p_rdata = (u64 *)rdata;
5951 
5952 	memset(rdata, 0, sizeof(*rdata));
5953 
5954 	/* Fill the ramrod data with provided parameters */
5955 	rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5956 	rdata->func_bit_map          = afex_vif_params->func_bit_map;
5957 	rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5958 	rdata->func_to_clear         = afex_vif_params->func_to_clear;
5959 
5960 	/* send in echo type of sub command */
5961 	rdata->echo = afex_vif_params->afex_vif_list_command;
5962 
5963 	/*  No need for an explicit memory barrier here as long we would
5964 	 *  need to ensure the ordering of writing to the SPQ element
5965 	 *  and updating of the SPQ producer which involves a memory
5966 	 *  read and we will have to put a full memory barrier there
5967 	 *  (inside bnx2x_sp_post()).
5968 	 */
5969 
5970 	DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5971 	   rdata->afex_vif_list_command, rdata->vif_list_index,
5972 	   rdata->func_bit_map, rdata->func_to_clear);
5973 
5974 	/* this ramrod sends data directly and not through DMA mapping */
5975 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5976 			     U64_HI(*p_rdata), U64_LO(*p_rdata),
5977 			     NONE_CONNECTION_TYPE);
5978 }
5979 
5980 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5981 				       struct bnx2x_func_state_params *params)
5982 {
5983 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5984 			     NONE_CONNECTION_TYPE);
5985 }
5986 
5987 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5988 				       struct bnx2x_func_state_params *params)
5989 {
5990 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5991 			     NONE_CONNECTION_TYPE);
5992 }
5993 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5994 				       struct bnx2x_func_state_params *params)
5995 {
5996 	struct bnx2x_func_sp_obj *o = params->f_obj;
5997 	struct flow_control_configuration *rdata =
5998 		(struct flow_control_configuration *)o->rdata;
5999 	dma_addr_t data_mapping = o->rdata_mapping;
6000 	struct bnx2x_func_tx_start_params *tx_start_params =
6001 		&params->params.tx_start;
6002 	int i;
6003 
6004 	memset(rdata, 0, sizeof(*rdata));
6005 
6006 	rdata->dcb_enabled = tx_start_params->dcb_enabled;
6007 	rdata->dcb_version = tx_start_params->dcb_version;
6008 	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
6009 
6010 	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6011 		rdata->traffic_type_to_priority_cos[i] =
6012 			tx_start_params->traffic_type_to_priority_cos[i];
6013 
6014 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6015 			     U64_HI(data_mapping),
6016 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
6017 }
6018 
6019 static int bnx2x_func_send_cmd(struct bnx2x *bp,
6020 			       struct bnx2x_func_state_params *params)
6021 {
6022 	switch (params->cmd) {
6023 	case BNX2X_F_CMD_HW_INIT:
6024 		return bnx2x_func_hw_init(bp, params);
6025 	case BNX2X_F_CMD_START:
6026 		return bnx2x_func_send_start(bp, params);
6027 	case BNX2X_F_CMD_STOP:
6028 		return bnx2x_func_send_stop(bp, params);
6029 	case BNX2X_F_CMD_HW_RESET:
6030 		return bnx2x_func_hw_reset(bp, params);
6031 	case BNX2X_F_CMD_AFEX_UPDATE:
6032 		return bnx2x_func_send_afex_update(bp, params);
6033 	case BNX2X_F_CMD_AFEX_VIFLISTS:
6034 		return bnx2x_func_send_afex_viflists(bp, params);
6035 	case BNX2X_F_CMD_TX_STOP:
6036 		return bnx2x_func_send_tx_stop(bp, params);
6037 	case BNX2X_F_CMD_TX_START:
6038 		return bnx2x_func_send_tx_start(bp, params);
6039 	case BNX2X_F_CMD_SWITCH_UPDATE:
6040 		return bnx2x_func_send_switch_update(bp, params);
6041 	default:
6042 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
6043 		return -EINVAL;
6044 	}
6045 }
6046 
6047 void bnx2x_init_func_obj(struct bnx2x *bp,
6048 			 struct bnx2x_func_sp_obj *obj,
6049 			 void *rdata, dma_addr_t rdata_mapping,
6050 			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
6051 			 struct bnx2x_func_sp_drv_ops *drv_iface)
6052 {
6053 	memset(obj, 0, sizeof(*obj));
6054 
6055 	mutex_init(&obj->one_pending_mutex);
6056 
6057 	obj->rdata = rdata;
6058 	obj->rdata_mapping = rdata_mapping;
6059 	obj->afex_rdata = afex_rdata;
6060 	obj->afex_rdata_mapping = afex_rdata_mapping;
6061 	obj->send_cmd = bnx2x_func_send_cmd;
6062 	obj->check_transition = bnx2x_func_chk_transition;
6063 	obj->complete_cmd = bnx2x_func_comp_cmd;
6064 	obj->wait_comp = bnx2x_func_wait_comp;
6065 
6066 	obj->drv = drv_iface;
6067 }
6068 
6069 /**
6070  * bnx2x_func_state_change - perform Function state change transition
6071  *
6072  * @bp:		device handle
6073  * @params:	parameters to perform the transaction
6074  *
6075  * returns 0 in case of successfully completed transition,
6076  *         negative error code in case of failure, positive
6077  *         (EBUSY) value if there is a completion to that is
6078  *         still pending (possible only if RAMROD_COMP_WAIT is
6079  *         not set in params->ramrod_flags for asynchronous
6080  *         commands).
6081  */
6082 int bnx2x_func_state_change(struct bnx2x *bp,
6083 			    struct bnx2x_func_state_params *params)
6084 {
6085 	struct bnx2x_func_sp_obj *o = params->f_obj;
6086 	int rc, cnt = 300;
6087 	enum bnx2x_func_cmd cmd = params->cmd;
6088 	unsigned long *pending = &o->pending;
6089 
6090 	mutex_lock(&o->one_pending_mutex);
6091 
6092 	/* Check that the requested transition is legal */
6093 	rc = o->check_transition(bp, o, params);
6094 	if ((rc == -EBUSY) &&
6095 	    (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
6096 		while ((rc == -EBUSY) && (--cnt > 0)) {
6097 			mutex_unlock(&o->one_pending_mutex);
6098 			msleep(10);
6099 			mutex_lock(&o->one_pending_mutex);
6100 			rc = o->check_transition(bp, o, params);
6101 		}
6102 		if (rc == -EBUSY) {
6103 			mutex_unlock(&o->one_pending_mutex);
6104 			BNX2X_ERR("timeout waiting for previous ramrod completion\n");
6105 			return rc;
6106 		}
6107 	} else if (rc) {
6108 		mutex_unlock(&o->one_pending_mutex);
6109 		return rc;
6110 	}
6111 
6112 	/* Set "pending" bit */
6113 	set_bit(cmd, pending);
6114 
6115 	/* Don't send a command if only driver cleanup was requested */
6116 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6117 		bnx2x_func_state_change_comp(bp, o, cmd);
6118 		mutex_unlock(&o->one_pending_mutex);
6119 	} else {
6120 		/* Send a ramrod */
6121 		rc = o->send_cmd(bp, params);
6122 
6123 		mutex_unlock(&o->one_pending_mutex);
6124 
6125 		if (rc) {
6126 			o->next_state = BNX2X_F_STATE_MAX;
6127 			clear_bit(cmd, pending);
6128 			smp_mb__after_clear_bit();
6129 			return rc;
6130 		}
6131 
6132 		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6133 			rc = o->wait_comp(bp, o, cmd);
6134 			if (rc)
6135 				return rc;
6136 
6137 			return 0;
6138 		}
6139 	}
6140 
6141 	return !!test_bit(cmd, pending);
6142 }
6143