1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2011-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
30 
31 #define BNX2X_MAX_EMUL_MULTI		16
32 
33 /**** Exe Queue interfaces ****/
34 
35 /**
36  * bnx2x_exe_queue_init - init the Exe Queue object
37  *
38  * @o:		poiter to the object
39  * @exe_len:	length
40  * @owner:	poiter to the owner
41  * @validate:	validate function pointer
42  * @optimize:	optimize function pointer
43  * @exec:	execute function pointer
44  * @get:	get function pointer
45  */
46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 					struct bnx2x_exe_queue_obj *o,
48 					int exe_len,
49 					union bnx2x_qable_obj *owner,
50 					exe_q_validate validate,
51 					exe_q_remove remove,
52 					exe_q_optimize optimize,
53 					exe_q_execute exec,
54 					exe_q_get get)
55 {
56 	memset(o, 0, sizeof(*o));
57 
58 	INIT_LIST_HEAD(&o->exe_queue);
59 	INIT_LIST_HEAD(&o->pending_comp);
60 
61 	spin_lock_init(&o->lock);
62 
63 	o->exe_chunk_len = exe_len;
64 	o->owner         = owner;
65 
66 	/* Owner specific callbacks */
67 	o->validate      = validate;
68 	o->remove        = remove;
69 	o->optimize      = optimize;
70 	o->execute       = exec;
71 	o->get           = get;
72 
73 	DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
74 	   exe_len);
75 }
76 
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 					     struct bnx2x_exeq_elem *elem)
79 {
80 	DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81 	kfree(elem);
82 }
83 
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
85 {
86 	struct bnx2x_exeq_elem *elem;
87 	int cnt = 0;
88 
89 	spin_lock_bh(&o->lock);
90 
91 	list_for_each_entry(elem, &o->exe_queue, link)
92 		cnt++;
93 
94 	spin_unlock_bh(&o->lock);
95 
96 	return cnt;
97 }
98 
99 /**
100  * bnx2x_exe_queue_add - add a new element to the execution queue
101  *
102  * @bp:		driver handle
103  * @o:		queue
104  * @cmd:	new command to add
105  * @restore:	true - do not optimize the command
106  *
107  * If the element is optimized or is illegal, frees it.
108  */
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 				      struct bnx2x_exe_queue_obj *o,
111 				      struct bnx2x_exeq_elem *elem,
112 				      bool restore)
113 {
114 	int rc;
115 
116 	spin_lock_bh(&o->lock);
117 
118 	if (!restore) {
119 		/* Try to cancel this element queue */
120 		rc = o->optimize(bp, o->owner, elem);
121 		if (rc)
122 			goto free_and_exit;
123 
124 		/* Check if this request is ok */
125 		rc = o->validate(bp, o->owner, elem);
126 		if (rc) {
127 			DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
128 			goto free_and_exit;
129 		}
130 	}
131 
132 	/* If so, add it to the execution queue */
133 	list_add_tail(&elem->link, &o->exe_queue);
134 
135 	spin_unlock_bh(&o->lock);
136 
137 	return 0;
138 
139 free_and_exit:
140 	bnx2x_exe_queue_free_elem(bp, elem);
141 
142 	spin_unlock_bh(&o->lock);
143 
144 	return rc;
145 
146 }
147 
148 static inline void __bnx2x_exe_queue_reset_pending(
149 	struct bnx2x *bp,
150 	struct bnx2x_exe_queue_obj *o)
151 {
152 	struct bnx2x_exeq_elem *elem;
153 
154 	while (!list_empty(&o->pending_comp)) {
155 		elem = list_first_entry(&o->pending_comp,
156 					struct bnx2x_exeq_elem, link);
157 
158 		list_del(&elem->link);
159 		bnx2x_exe_queue_free_elem(bp, elem);
160 	}
161 }
162 
163 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
164 						 struct bnx2x_exe_queue_obj *o)
165 {
166 
167 	spin_lock_bh(&o->lock);
168 
169 	__bnx2x_exe_queue_reset_pending(bp, o);
170 
171 	spin_unlock_bh(&o->lock);
172 
173 }
174 
175 /**
176  * bnx2x_exe_queue_step - execute one execution chunk atomically
177  *
178  * @bp:			driver handle
179  * @o:			queue
180  * @ramrod_flags:	flags
181  *
182  * (Atomicy is ensured using the exe_queue->lock).
183  */
184 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
185 				       struct bnx2x_exe_queue_obj *o,
186 				       unsigned long *ramrod_flags)
187 {
188 	struct bnx2x_exeq_elem *elem, spacer;
189 	int cur_len = 0, rc;
190 
191 	memset(&spacer, 0, sizeof(spacer));
192 
193 	spin_lock_bh(&o->lock);
194 
195 	/*
196 	 * Next step should not be performed until the current is finished,
197 	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
198 	 * properly clear object internals without sending any command to the FW
199 	 * which also implies there won't be any completion to clear the
200 	 * 'pending' list.
201 	 */
202 	if (!list_empty(&o->pending_comp)) {
203 		if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
204 			DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
205 			__bnx2x_exe_queue_reset_pending(bp, o);
206 		} else {
207 			spin_unlock_bh(&o->lock);
208 			return 1;
209 		}
210 	}
211 
212 	/*
213 	 * Run through the pending commands list and create a next
214 	 * execution chunk.
215 	 */
216 	while (!list_empty(&o->exe_queue)) {
217 		elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
218 					link);
219 		WARN_ON(!elem->cmd_len);
220 
221 		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
222 			cur_len += elem->cmd_len;
223 			/*
224 			 * Prevent from both lists being empty when moving an
225 			 * element. This will allow the call of
226 			 * bnx2x_exe_queue_empty() without locking.
227 			 */
228 			list_add_tail(&spacer.link, &o->pending_comp);
229 			mb();
230 			list_move_tail(&elem->link, &o->pending_comp);
231 			list_del(&spacer.link);
232 		} else
233 			break;
234 	}
235 
236 	/* Sanity check */
237 	if (!cur_len) {
238 		spin_unlock_bh(&o->lock);
239 		return 0;
240 	}
241 
242 	rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
243 	if (rc < 0)
244 		/*
245 		 *  In case of an error return the commands back to the queue
246 		 *  and reset the pending_comp.
247 		 */
248 		list_splice_init(&o->pending_comp, &o->exe_queue);
249 	else if (!rc)
250 		/*
251 		 * If zero is returned, means there are no outstanding pending
252 		 * completions and we may dismiss the pending list.
253 		 */
254 		__bnx2x_exe_queue_reset_pending(bp, o);
255 
256 	spin_unlock_bh(&o->lock);
257 	return rc;
258 }
259 
260 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
261 {
262 	bool empty = list_empty(&o->exe_queue);
263 
264 	/* Don't reorder!!! */
265 	mb();
266 
267 	return empty && list_empty(&o->pending_comp);
268 }
269 
270 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
271 	struct bnx2x *bp)
272 {
273 	DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
274 	return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
275 }
276 
277 /************************ raw_obj functions ***********************************/
278 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
279 {
280 	return !!test_bit(o->state, o->pstate);
281 }
282 
283 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
284 {
285 	smp_mb__before_clear_bit();
286 	clear_bit(o->state, o->pstate);
287 	smp_mb__after_clear_bit();
288 }
289 
290 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
291 {
292 	smp_mb__before_clear_bit();
293 	set_bit(o->state, o->pstate);
294 	smp_mb__after_clear_bit();
295 }
296 
297 /**
298  * bnx2x_state_wait - wait until the given bit(state) is cleared
299  *
300  * @bp:		device handle
301  * @state:	state which is to be cleared
302  * @state_p:	state buffer
303  *
304  */
305 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
306 				   unsigned long *pstate)
307 {
308 	/* can take a while if any port is running */
309 	int cnt = 5000;
310 
311 
312 	if (CHIP_REV_IS_EMUL(bp))
313 		cnt *= 20;
314 
315 	DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
316 
317 	might_sleep();
318 	while (cnt--) {
319 		if (!test_bit(state, pstate)) {
320 #ifdef BNX2X_STOP_ON_ERROR
321 			DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
322 #endif
323 			return 0;
324 		}
325 
326 		usleep_range(1000, 2000);
327 
328 		if (bp->panic)
329 			return -EIO;
330 	}
331 
332 	/* timeout! */
333 	BNX2X_ERR("timeout waiting for state %d\n", state);
334 #ifdef BNX2X_STOP_ON_ERROR
335 	bnx2x_panic();
336 #endif
337 
338 	return -EBUSY;
339 }
340 
341 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
342 {
343 	return bnx2x_state_wait(bp, raw->state, raw->pstate);
344 }
345 
346 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
347 /* credit handling callbacks */
348 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
349 {
350 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
351 
352 	WARN_ON(!mp);
353 
354 	return mp->get_entry(mp, offset);
355 }
356 
357 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
358 {
359 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
360 
361 	WARN_ON(!mp);
362 
363 	return mp->get(mp, 1);
364 }
365 
366 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
367 {
368 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
369 
370 	WARN_ON(!vp);
371 
372 	return vp->get_entry(vp, offset);
373 }
374 
375 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
376 {
377 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
378 
379 	WARN_ON(!vp);
380 
381 	return vp->get(vp, 1);
382 }
383 
384 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
385 {
386 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
387 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
388 
389 	if (!mp->get(mp, 1))
390 		return false;
391 
392 	if (!vp->get(vp, 1)) {
393 		mp->put(mp, 1);
394 		return false;
395 	}
396 
397 	return true;
398 }
399 
400 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
401 {
402 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
403 
404 	return mp->put_entry(mp, offset);
405 }
406 
407 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
408 {
409 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
410 
411 	return mp->put(mp, 1);
412 }
413 
414 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
415 {
416 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
417 
418 	return vp->put_entry(vp, offset);
419 }
420 
421 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
422 {
423 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
424 
425 	return vp->put(vp, 1);
426 }
427 
428 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
429 {
430 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
431 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
432 
433 	if (!mp->put(mp, 1))
434 		return false;
435 
436 	if (!vp->put(vp, 1)) {
437 		mp->get(mp, 1);
438 		return false;
439 	}
440 
441 	return true;
442 }
443 
444 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
445 				int n, u8 *base, u8 stride, u8 size)
446 {
447 	struct bnx2x_vlan_mac_registry_elem *pos;
448 	u8 *next = base;
449 	int counter = 0;
450 
451 	/* traverse list */
452 	list_for_each_entry(pos, &o->head, link) {
453 		if (counter < n) {
454 			memcpy(next, &pos->u, size);
455 			counter++;
456 			DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
457 			   counter, next);
458 			next += stride + size;
459 
460 		}
461 	}
462 	return counter * ETH_ALEN;
463 }
464 
465 /* check_add() callbacks */
466 static int bnx2x_check_mac_add(struct bnx2x *bp,
467 			       struct bnx2x_vlan_mac_obj *o,
468 			       union bnx2x_classification_ramrod_data *data)
469 {
470 	struct bnx2x_vlan_mac_registry_elem *pos;
471 
472 	DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
473 
474 	if (!is_valid_ether_addr(data->mac.mac))
475 		return -EINVAL;
476 
477 	/* Check if a requested MAC already exists */
478 	list_for_each_entry(pos, &o->head, link)
479 		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
480 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
481 			return -EEXIST;
482 
483 	return 0;
484 }
485 
486 static int bnx2x_check_vlan_add(struct bnx2x *bp,
487 				struct bnx2x_vlan_mac_obj *o,
488 				union bnx2x_classification_ramrod_data *data)
489 {
490 	struct bnx2x_vlan_mac_registry_elem *pos;
491 
492 	DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
493 
494 	list_for_each_entry(pos, &o->head, link)
495 		if (data->vlan.vlan == pos->u.vlan.vlan)
496 			return -EEXIST;
497 
498 	return 0;
499 }
500 
501 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
502 				    struct bnx2x_vlan_mac_obj *o,
503 				   union bnx2x_classification_ramrod_data *data)
504 {
505 	struct bnx2x_vlan_mac_registry_elem *pos;
506 
507 	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
508 	   data->vlan_mac.mac, data->vlan_mac.vlan);
509 
510 	list_for_each_entry(pos, &o->head, link)
511 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
512 		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
513 				  ETH_ALEN)) &&
514 		    (data->vlan_mac.is_inner_mac ==
515 		     pos->u.vlan_mac.is_inner_mac))
516 			return -EEXIST;
517 
518 	return 0;
519 }
520 
521 
522 /* check_del() callbacks */
523 static struct bnx2x_vlan_mac_registry_elem *
524 	bnx2x_check_mac_del(struct bnx2x *bp,
525 			    struct bnx2x_vlan_mac_obj *o,
526 			    union bnx2x_classification_ramrod_data *data)
527 {
528 	struct bnx2x_vlan_mac_registry_elem *pos;
529 
530 	DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
531 
532 	list_for_each_entry(pos, &o->head, link)
533 		if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
534 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
535 			return pos;
536 
537 	return NULL;
538 }
539 
540 static struct bnx2x_vlan_mac_registry_elem *
541 	bnx2x_check_vlan_del(struct bnx2x *bp,
542 			     struct bnx2x_vlan_mac_obj *o,
543 			     union bnx2x_classification_ramrod_data *data)
544 {
545 	struct bnx2x_vlan_mac_registry_elem *pos;
546 
547 	DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
548 
549 	list_for_each_entry(pos, &o->head, link)
550 		if (data->vlan.vlan == pos->u.vlan.vlan)
551 			return pos;
552 
553 	return NULL;
554 }
555 
556 static struct bnx2x_vlan_mac_registry_elem *
557 	bnx2x_check_vlan_mac_del(struct bnx2x *bp,
558 				 struct bnx2x_vlan_mac_obj *o,
559 				 union bnx2x_classification_ramrod_data *data)
560 {
561 	struct bnx2x_vlan_mac_registry_elem *pos;
562 
563 	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
564 	   data->vlan_mac.mac, data->vlan_mac.vlan);
565 
566 	list_for_each_entry(pos, &o->head, link)
567 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
568 		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
569 			     ETH_ALEN)) &&
570 		    (data->vlan_mac.is_inner_mac ==
571 		     pos->u.vlan_mac.is_inner_mac))
572 			return pos;
573 
574 	return NULL;
575 }
576 
577 /* check_move() callback */
578 static bool bnx2x_check_move(struct bnx2x *bp,
579 			     struct bnx2x_vlan_mac_obj *src_o,
580 			     struct bnx2x_vlan_mac_obj *dst_o,
581 			     union bnx2x_classification_ramrod_data *data)
582 {
583 	struct bnx2x_vlan_mac_registry_elem *pos;
584 	int rc;
585 
586 	/* Check if we can delete the requested configuration from the first
587 	 * object.
588 	 */
589 	pos = src_o->check_del(bp, src_o, data);
590 
591 	/*  check if configuration can be added */
592 	rc = dst_o->check_add(bp, dst_o, data);
593 
594 	/* If this classification can not be added (is already set)
595 	 * or can't be deleted - return an error.
596 	 */
597 	if (rc || !pos)
598 		return false;
599 
600 	return true;
601 }
602 
603 static bool bnx2x_check_move_always_err(
604 	struct bnx2x *bp,
605 	struct bnx2x_vlan_mac_obj *src_o,
606 	struct bnx2x_vlan_mac_obj *dst_o,
607 	union bnx2x_classification_ramrod_data *data)
608 {
609 	return false;
610 }
611 
612 
613 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
614 {
615 	struct bnx2x_raw_obj *raw = &o->raw;
616 	u8 rx_tx_flag = 0;
617 
618 	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
619 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
620 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
621 
622 	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
623 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
624 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
625 
626 	return rx_tx_flag;
627 }
628 
629 
630 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
631 			  bool add, unsigned char *dev_addr, int index)
632 {
633 	u32 wb_data[2];
634 	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
635 			 NIG_REG_LLH0_FUNC_MEM;
636 
637 	if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
638 		return;
639 
640 	if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
641 		return;
642 
643 	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
644 			 (add ? "ADD" : "DELETE"), index);
645 
646 	if (add) {
647 		/* LLH_FUNC_MEM is a u64 WB register */
648 		reg_offset += 8*index;
649 
650 		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
651 			      (dev_addr[4] <<  8) |  dev_addr[5]);
652 		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
653 
654 		REG_WR_DMAE(bp, reg_offset, wb_data, 2);
655 	}
656 
657 	REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
658 				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
659 }
660 
661 /**
662  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
663  *
664  * @bp:		device handle
665  * @o:		queue for which we want to configure this rule
666  * @add:	if true the command is an ADD command, DEL otherwise
667  * @opcode:	CLASSIFY_RULE_OPCODE_XXX
668  * @hdr:	pointer to a header to setup
669  *
670  */
671 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
672 	struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
673 	struct eth_classify_cmd_header *hdr)
674 {
675 	struct bnx2x_raw_obj *raw = &o->raw;
676 
677 	hdr->client_id = raw->cl_id;
678 	hdr->func_id = raw->func_id;
679 
680 	/* Rx or/and Tx (internal switching) configuration ? */
681 	hdr->cmd_general_data |=
682 		bnx2x_vlan_mac_get_rx_tx_flag(o);
683 
684 	if (add)
685 		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
686 
687 	hdr->cmd_general_data |=
688 		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
689 }
690 
691 /**
692  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
693  *
694  * @cid:	connection id
695  * @type:	BNX2X_FILTER_XXX_PENDING
696  * @hdr:	poiter to header to setup
697  * @rule_cnt:
698  *
699  * currently we always configure one rule and echo field to contain a CID and an
700  * opcode type.
701  */
702 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
703 				struct eth_classify_header *hdr, int rule_cnt)
704 {
705 	hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
706 				(type << BNX2X_SWCID_SHIFT));
707 	hdr->rule_cnt = (u8)rule_cnt;
708 }
709 
710 
711 /* hw_config() callbacks */
712 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
713 				 struct bnx2x_vlan_mac_obj *o,
714 				 struct bnx2x_exeq_elem *elem, int rule_idx,
715 				 int cam_offset)
716 {
717 	struct bnx2x_raw_obj *raw = &o->raw;
718 	struct eth_classify_rules_ramrod_data *data =
719 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
720 	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
721 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
722 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
723 	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
724 	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
725 
726 	/*
727 	 * Set LLH CAM entry: currently only iSCSI and ETH macs are
728 	 * relevant. In addition, current implementation is tuned for a
729 	 * single ETH MAC.
730 	 *
731 	 * When multiple unicast ETH MACs PF configuration in switch
732 	 * independent mode is required (NetQ, multiple netdev MACs,
733 	 * etc.), consider better utilisation of 8 per function MAC
734 	 * entries in the LLH register. There is also
735 	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
736 	 * total number of CAM entries to 16.
737 	 *
738 	 * Currently we won't configure NIG for MACs other than a primary ETH
739 	 * MAC and iSCSI L2 MAC.
740 	 *
741 	 * If this MAC is moving from one Queue to another, no need to change
742 	 * NIG configuration.
743 	 */
744 	if (cmd != BNX2X_VLAN_MAC_MOVE) {
745 		if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
746 			bnx2x_set_mac_in_nig(bp, add, mac,
747 					     BNX2X_LLH_CAM_ISCSI_ETH_LINE);
748 		else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
749 			bnx2x_set_mac_in_nig(bp, add, mac,
750 					     BNX2X_LLH_CAM_ETH_LINE);
751 	}
752 
753 	/* Reset the ramrod data buffer for the first rule */
754 	if (rule_idx == 0)
755 		memset(data, 0, sizeof(*data));
756 
757 	/* Setup a command header */
758 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
759 				      &rule_entry->mac.header);
760 
761 	DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
762 	   (add ? "add" : "delete"), mac, raw->cl_id);
763 
764 	/* Set a MAC itself */
765 	bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
766 			      &rule_entry->mac.mac_mid,
767 			      &rule_entry->mac.mac_lsb, mac);
768 	rule_entry->mac.inner_mac =
769 		cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
770 
771 	/* MOVE: Add a rule that will add this MAC to the target Queue */
772 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
773 		rule_entry++;
774 		rule_cnt++;
775 
776 		/* Setup ramrod data */
777 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
778 					elem->cmd_data.vlan_mac.target_obj,
779 					      true, CLASSIFY_RULE_OPCODE_MAC,
780 					      &rule_entry->mac.header);
781 
782 		/* Set a MAC itself */
783 		bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
784 				      &rule_entry->mac.mac_mid,
785 				      &rule_entry->mac.mac_lsb, mac);
786 		rule_entry->mac.inner_mac =
787 			cpu_to_le16(elem->cmd_data.vlan_mac.
788 						u.mac.is_inner_mac);
789 	}
790 
791 	/* Set the ramrod data header */
792 	/* TODO: take this to the higher level in order to prevent multiple
793 		 writing */
794 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
795 					rule_cnt);
796 }
797 
798 /**
799  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
800  *
801  * @bp:		device handle
802  * @o:		queue
803  * @type:
804  * @cam_offset:	offset in cam memory
805  * @hdr:	pointer to a header to setup
806  *
807  * E1/E1H
808  */
809 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
810 	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
811 	struct mac_configuration_hdr *hdr)
812 {
813 	struct bnx2x_raw_obj *r = &o->raw;
814 
815 	hdr->length = 1;
816 	hdr->offset = (u8)cam_offset;
817 	hdr->client_id = cpu_to_le16(0xff);
818 	hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
819 				(type << BNX2X_SWCID_SHIFT));
820 }
821 
822 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
823 	struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
824 	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
825 {
826 	struct bnx2x_raw_obj *r = &o->raw;
827 	u32 cl_bit_vec = (1 << r->cl_id);
828 
829 	cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
830 	cfg_entry->pf_id = r->func_id;
831 	cfg_entry->vlan_id = cpu_to_le16(vlan_id);
832 
833 	if (add) {
834 		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
835 			 T_ETH_MAC_COMMAND_SET);
836 		SET_FLAG(cfg_entry->flags,
837 			 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
838 
839 		/* Set a MAC in a ramrod data */
840 		bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
841 				      &cfg_entry->middle_mac_addr,
842 				      &cfg_entry->lsb_mac_addr, mac);
843 	} else
844 		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
845 			 T_ETH_MAC_COMMAND_INVALIDATE);
846 }
847 
848 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
849 	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
850 	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
851 {
852 	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
853 	struct bnx2x_raw_obj *raw = &o->raw;
854 
855 	bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
856 					 &config->hdr);
857 	bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
858 					 cfg_entry);
859 
860 	DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
861 			 (add ? "setting" : "clearing"),
862 			 mac, raw->cl_id, cam_offset);
863 }
864 
865 /**
866  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
867  *
868  * @bp:		device handle
869  * @o:		bnx2x_vlan_mac_obj
870  * @elem:	bnx2x_exeq_elem
871  * @rule_idx:	rule_idx
872  * @cam_offset: cam_offset
873  */
874 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
875 				  struct bnx2x_vlan_mac_obj *o,
876 				  struct bnx2x_exeq_elem *elem, int rule_idx,
877 				  int cam_offset)
878 {
879 	struct bnx2x_raw_obj *raw = &o->raw;
880 	struct mac_configuration_cmd *config =
881 		(struct mac_configuration_cmd *)(raw->rdata);
882 	/*
883 	 * 57710 and 57711 do not support MOVE command,
884 	 * so it's either ADD or DEL
885 	 */
886 	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
887 		true : false;
888 
889 	/* Reset the ramrod data buffer */
890 	memset(config, 0, sizeof(*config));
891 
892 	bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
893 				     cam_offset, add,
894 				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
895 				     ETH_VLAN_FILTER_ANY_VLAN, config);
896 }
897 
898 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
899 				  struct bnx2x_vlan_mac_obj *o,
900 				  struct bnx2x_exeq_elem *elem, int rule_idx,
901 				  int cam_offset)
902 {
903 	struct bnx2x_raw_obj *raw = &o->raw;
904 	struct eth_classify_rules_ramrod_data *data =
905 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
906 	int rule_cnt = rule_idx + 1;
907 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
908 	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
909 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
910 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
911 
912 	/* Reset the ramrod data buffer for the first rule */
913 	if (rule_idx == 0)
914 		memset(data, 0, sizeof(*data));
915 
916 	/* Set a rule header */
917 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
918 				      &rule_entry->vlan.header);
919 
920 	DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
921 			 vlan);
922 
923 	/* Set a VLAN itself */
924 	rule_entry->vlan.vlan = cpu_to_le16(vlan);
925 
926 	/* MOVE: Add a rule that will add this MAC to the target Queue */
927 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
928 		rule_entry++;
929 		rule_cnt++;
930 
931 		/* Setup ramrod data */
932 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
933 					elem->cmd_data.vlan_mac.target_obj,
934 					      true, CLASSIFY_RULE_OPCODE_VLAN,
935 					      &rule_entry->vlan.header);
936 
937 		/* Set a VLAN itself */
938 		rule_entry->vlan.vlan = cpu_to_le16(vlan);
939 	}
940 
941 	/* Set the ramrod data header */
942 	/* TODO: take this to the higher level in order to prevent multiple
943 		 writing */
944 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
945 					rule_cnt);
946 }
947 
948 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
949 				      struct bnx2x_vlan_mac_obj *o,
950 				      struct bnx2x_exeq_elem *elem,
951 				      int rule_idx, int cam_offset)
952 {
953 	struct bnx2x_raw_obj *raw = &o->raw;
954 	struct eth_classify_rules_ramrod_data *data =
955 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
956 	int rule_cnt = rule_idx + 1;
957 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
958 	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
959 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
960 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
961 	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
962 
963 
964 	/* Reset the ramrod data buffer for the first rule */
965 	if (rule_idx == 0)
966 		memset(data, 0, sizeof(*data));
967 
968 	/* Set a rule header */
969 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
970 				      &rule_entry->pair.header);
971 
972 	/* Set VLAN and MAC themselvs */
973 	rule_entry->pair.vlan = cpu_to_le16(vlan);
974 	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
975 			      &rule_entry->pair.mac_mid,
976 			      &rule_entry->pair.mac_lsb, mac);
977 	rule_entry->pair.inner_mac =
978 		cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
979 	/* MOVE: Add a rule that will add this MAC to the target Queue */
980 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
981 		rule_entry++;
982 		rule_cnt++;
983 
984 		/* Setup ramrod data */
985 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
986 					elem->cmd_data.vlan_mac.target_obj,
987 					      true, CLASSIFY_RULE_OPCODE_PAIR,
988 					      &rule_entry->pair.header);
989 
990 		/* Set a VLAN itself */
991 		rule_entry->pair.vlan = cpu_to_le16(vlan);
992 		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
993 				      &rule_entry->pair.mac_mid,
994 				      &rule_entry->pair.mac_lsb, mac);
995 		rule_entry->pair.inner_mac =
996 			cpu_to_le16(elem->cmd_data.vlan_mac.u.
997 						vlan_mac.is_inner_mac);
998 	}
999 
1000 	/* Set the ramrod data header */
1001 	/* TODO: take this to the higher level in order to prevent multiple
1002 		 writing */
1003 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1004 					rule_cnt);
1005 }
1006 
1007 /**
1008  * bnx2x_set_one_vlan_mac_e1h -
1009  *
1010  * @bp:		device handle
1011  * @o:		bnx2x_vlan_mac_obj
1012  * @elem:	bnx2x_exeq_elem
1013  * @rule_idx:	rule_idx
1014  * @cam_offset:	cam_offset
1015  */
1016 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1017 				       struct bnx2x_vlan_mac_obj *o,
1018 				       struct bnx2x_exeq_elem *elem,
1019 				       int rule_idx, int cam_offset)
1020 {
1021 	struct bnx2x_raw_obj *raw = &o->raw;
1022 	struct mac_configuration_cmd *config =
1023 		(struct mac_configuration_cmd *)(raw->rdata);
1024 	/*
1025 	 * 57710 and 57711 do not support MOVE command,
1026 	 * so it's either ADD or DEL
1027 	 */
1028 	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1029 		true : false;
1030 
1031 	/* Reset the ramrod data buffer */
1032 	memset(config, 0, sizeof(*config));
1033 
1034 	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1035 				     cam_offset, add,
1036 				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1037 				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1038 				     ETH_VLAN_FILTER_CLASSIFY, config);
1039 }
1040 
1041 #define list_next_entry(pos, member) \
1042 	list_entry((pos)->member.next, typeof(*(pos)), member)
1043 
1044 /**
1045  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1046  *
1047  * @bp:		device handle
1048  * @p:		command parameters
1049  * @ppos:	pointer to the cooky
1050  *
1051  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1052  * previously configured elements list.
1053  *
1054  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
1055  * into an account
1056  *
1057  * pointer to the cooky  - that should be given back in the next call to make
1058  * function handle the next element. If *ppos is set to NULL it will restart the
1059  * iterator. If returned *ppos == NULL this means that the last element has been
1060  * handled.
1061  *
1062  */
1063 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1064 			   struct bnx2x_vlan_mac_ramrod_params *p,
1065 			   struct bnx2x_vlan_mac_registry_elem **ppos)
1066 {
1067 	struct bnx2x_vlan_mac_registry_elem *pos;
1068 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1069 
1070 	/* If list is empty - there is nothing to do here */
1071 	if (list_empty(&o->head)) {
1072 		*ppos = NULL;
1073 		return 0;
1074 	}
1075 
1076 	/* make a step... */
1077 	if (*ppos == NULL)
1078 		*ppos = list_first_entry(&o->head,
1079 					 struct bnx2x_vlan_mac_registry_elem,
1080 					 link);
1081 	else
1082 		*ppos = list_next_entry(*ppos, link);
1083 
1084 	pos = *ppos;
1085 
1086 	/* If it's the last step - return NULL */
1087 	if (list_is_last(&pos->link, &o->head))
1088 		*ppos = NULL;
1089 
1090 	/* Prepare a 'user_req' */
1091 	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1092 
1093 	/* Set the command */
1094 	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1095 
1096 	/* Set vlan_mac_flags */
1097 	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1098 
1099 	/* Set a restore bit */
1100 	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1101 
1102 	return bnx2x_config_vlan_mac(bp, p);
1103 }
1104 
1105 /*
1106  * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1107  * pointer to an element with a specific criteria and NULL if such an element
1108  * hasn't been found.
1109  */
1110 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1111 	struct bnx2x_exe_queue_obj *o,
1112 	struct bnx2x_exeq_elem *elem)
1113 {
1114 	struct bnx2x_exeq_elem *pos;
1115 	struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1116 
1117 	/* Check pending for execution commands */
1118 	list_for_each_entry(pos, &o->exe_queue, link)
1119 		if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1120 			      sizeof(*data)) &&
1121 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1122 			return pos;
1123 
1124 	return NULL;
1125 }
1126 
1127 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1128 	struct bnx2x_exe_queue_obj *o,
1129 	struct bnx2x_exeq_elem *elem)
1130 {
1131 	struct bnx2x_exeq_elem *pos;
1132 	struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1133 
1134 	/* Check pending for execution commands */
1135 	list_for_each_entry(pos, &o->exe_queue, link)
1136 		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1137 			      sizeof(*data)) &&
1138 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1139 			return pos;
1140 
1141 	return NULL;
1142 }
1143 
1144 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1145 	struct bnx2x_exe_queue_obj *o,
1146 	struct bnx2x_exeq_elem *elem)
1147 {
1148 	struct bnx2x_exeq_elem *pos;
1149 	struct bnx2x_vlan_mac_ramrod_data *data =
1150 		&elem->cmd_data.vlan_mac.u.vlan_mac;
1151 
1152 	/* Check pending for execution commands */
1153 	list_for_each_entry(pos, &o->exe_queue, link)
1154 		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1155 			      sizeof(*data)) &&
1156 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1157 			return pos;
1158 
1159 	return NULL;
1160 }
1161 
1162 /**
1163  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1164  *
1165  * @bp:		device handle
1166  * @qo:		bnx2x_qable_obj
1167  * @elem:	bnx2x_exeq_elem
1168  *
1169  * Checks that the requested configuration can be added. If yes and if
1170  * requested, consume CAM credit.
1171  *
1172  * The 'validate' is run after the 'optimize'.
1173  *
1174  */
1175 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1176 					      union bnx2x_qable_obj *qo,
1177 					      struct bnx2x_exeq_elem *elem)
1178 {
1179 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1180 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1181 	int rc;
1182 
1183 	/* Check the registry */
1184 	rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1185 	if (rc) {
1186 		DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1187 		return rc;
1188 	}
1189 
1190 	/*
1191 	 * Check if there is a pending ADD command for this
1192 	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1193 	 */
1194 	if (exeq->get(exeq, elem)) {
1195 		DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1196 		return -EEXIST;
1197 	}
1198 
1199 	/*
1200 	 * TODO: Check the pending MOVE from other objects where this
1201 	 * object is a destination object.
1202 	 */
1203 
1204 	/* Consume the credit if not requested not to */
1205 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1206 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1207 	    o->get_credit(o)))
1208 		return -EINVAL;
1209 
1210 	return 0;
1211 }
1212 
1213 /**
1214  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1215  *
1216  * @bp:		device handle
1217  * @qo:		quable object to check
1218  * @elem:	element that needs to be deleted
1219  *
1220  * Checks that the requested configuration can be deleted. If yes and if
1221  * requested, returns a CAM credit.
1222  *
1223  * The 'validate' is run after the 'optimize'.
1224  */
1225 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1226 					      union bnx2x_qable_obj *qo,
1227 					      struct bnx2x_exeq_elem *elem)
1228 {
1229 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1230 	struct bnx2x_vlan_mac_registry_elem *pos;
1231 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1232 	struct bnx2x_exeq_elem query_elem;
1233 
1234 	/* If this classification can not be deleted (doesn't exist)
1235 	 * - return a BNX2X_EXIST.
1236 	 */
1237 	pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1238 	if (!pos) {
1239 		DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1240 		return -EEXIST;
1241 	}
1242 
1243 	/*
1244 	 * Check if there are pending DEL or MOVE commands for this
1245 	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1246 	 */
1247 	memcpy(&query_elem, elem, sizeof(query_elem));
1248 
1249 	/* Check for MOVE commands */
1250 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1251 	if (exeq->get(exeq, &query_elem)) {
1252 		BNX2X_ERR("There is a pending MOVE command already\n");
1253 		return -EINVAL;
1254 	}
1255 
1256 	/* Check for DEL commands */
1257 	if (exeq->get(exeq, elem)) {
1258 		DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1259 		return -EEXIST;
1260 	}
1261 
1262 	/* Return the credit to the credit pool if not requested not to */
1263 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1264 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1265 	    o->put_credit(o))) {
1266 		BNX2X_ERR("Failed to return a credit\n");
1267 		return -EINVAL;
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 /**
1274  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1275  *
1276  * @bp:		device handle
1277  * @qo:		quable object to check (source)
1278  * @elem:	element that needs to be moved
1279  *
1280  * Checks that the requested configuration can be moved. If yes and if
1281  * requested, returns a CAM credit.
1282  *
1283  * The 'validate' is run after the 'optimize'.
1284  */
1285 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1286 					       union bnx2x_qable_obj *qo,
1287 					       struct bnx2x_exeq_elem *elem)
1288 {
1289 	struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1290 	struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1291 	struct bnx2x_exeq_elem query_elem;
1292 	struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1293 	struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1294 
1295 	/*
1296 	 * Check if we can perform this operation based on the current registry
1297 	 * state.
1298 	 */
1299 	if (!src_o->check_move(bp, src_o, dest_o,
1300 			       &elem->cmd_data.vlan_mac.u)) {
1301 		DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1302 		return -EINVAL;
1303 	}
1304 
1305 	/*
1306 	 * Check if there is an already pending DEL or MOVE command for the
1307 	 * source object or ADD command for a destination object. Return an
1308 	 * error if so.
1309 	 */
1310 	memcpy(&query_elem, elem, sizeof(query_elem));
1311 
1312 	/* Check DEL on source */
1313 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1314 	if (src_exeq->get(src_exeq, &query_elem)) {
1315 		BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1316 		return -EINVAL;
1317 	}
1318 
1319 	/* Check MOVE on source */
1320 	if (src_exeq->get(src_exeq, elem)) {
1321 		DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1322 		return -EEXIST;
1323 	}
1324 
1325 	/* Check ADD on destination */
1326 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1327 	if (dest_exeq->get(dest_exeq, &query_elem)) {
1328 		BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1329 		return -EINVAL;
1330 	}
1331 
1332 	/* Consume the credit if not requested not to */
1333 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1334 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1335 	    dest_o->get_credit(dest_o)))
1336 		return -EINVAL;
1337 
1338 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1339 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1340 	    src_o->put_credit(src_o))) {
1341 		/* return the credit taken from dest... */
1342 		dest_o->put_credit(dest_o);
1343 		return -EINVAL;
1344 	}
1345 
1346 	return 0;
1347 }
1348 
1349 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1350 				   union bnx2x_qable_obj *qo,
1351 				   struct bnx2x_exeq_elem *elem)
1352 {
1353 	switch (elem->cmd_data.vlan_mac.cmd) {
1354 	case BNX2X_VLAN_MAC_ADD:
1355 		return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1356 	case BNX2X_VLAN_MAC_DEL:
1357 		return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1358 	case BNX2X_VLAN_MAC_MOVE:
1359 		return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1360 	default:
1361 		return -EINVAL;
1362 	}
1363 }
1364 
1365 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1366 				  union bnx2x_qable_obj *qo,
1367 				  struct bnx2x_exeq_elem *elem)
1368 {
1369 	int rc = 0;
1370 
1371 	/* If consumption wasn't required, nothing to do */
1372 	if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1373 		     &elem->cmd_data.vlan_mac.vlan_mac_flags))
1374 		return 0;
1375 
1376 	switch (elem->cmd_data.vlan_mac.cmd) {
1377 	case BNX2X_VLAN_MAC_ADD:
1378 	case BNX2X_VLAN_MAC_MOVE:
1379 		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1380 		break;
1381 	case BNX2X_VLAN_MAC_DEL:
1382 		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1383 		break;
1384 	default:
1385 		return -EINVAL;
1386 	}
1387 
1388 	if (rc != true)
1389 		return -EINVAL;
1390 
1391 	return 0;
1392 }
1393 
1394 /**
1395  * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1396  *
1397  * @bp:		device handle
1398  * @o:		bnx2x_vlan_mac_obj
1399  *
1400  */
1401 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1402 			       struct bnx2x_vlan_mac_obj *o)
1403 {
1404 	int cnt = 5000, rc;
1405 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1406 	struct bnx2x_raw_obj *raw = &o->raw;
1407 
1408 	while (cnt--) {
1409 		/* Wait for the current command to complete */
1410 		rc = raw->wait_comp(bp, raw);
1411 		if (rc)
1412 			return rc;
1413 
1414 		/* Wait until there are no pending commands */
1415 		if (!bnx2x_exe_queue_empty(exeq))
1416 			usleep_range(1000, 2000);
1417 		else
1418 			return 0;
1419 	}
1420 
1421 	return -EBUSY;
1422 }
1423 
1424 /**
1425  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1426  *
1427  * @bp:		device handle
1428  * @o:		bnx2x_vlan_mac_obj
1429  * @cqe:
1430  * @cont:	if true schedule next execution chunk
1431  *
1432  */
1433 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1434 				   struct bnx2x_vlan_mac_obj *o,
1435 				   union event_ring_elem *cqe,
1436 				   unsigned long *ramrod_flags)
1437 {
1438 	struct bnx2x_raw_obj *r = &o->raw;
1439 	int rc;
1440 
1441 	/* Reset pending list */
1442 	bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1443 
1444 	/* Clear pending */
1445 	r->clear_pending(r);
1446 
1447 	/* If ramrod failed this is most likely a SW bug */
1448 	if (cqe->message.error)
1449 		return -EINVAL;
1450 
1451 	/* Run the next bulk of pending commands if requested */
1452 	if (test_bit(RAMROD_CONT, ramrod_flags)) {
1453 		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1454 		if (rc < 0)
1455 			return rc;
1456 	}
1457 
1458 	/* If there is more work to do return PENDING */
1459 	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1460 		return 1;
1461 
1462 	return 0;
1463 }
1464 
1465 /**
1466  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1467  *
1468  * @bp:		device handle
1469  * @o:		bnx2x_qable_obj
1470  * @elem:	bnx2x_exeq_elem
1471  */
1472 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1473 				   union bnx2x_qable_obj *qo,
1474 				   struct bnx2x_exeq_elem *elem)
1475 {
1476 	struct bnx2x_exeq_elem query, *pos;
1477 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1478 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1479 
1480 	memcpy(&query, elem, sizeof(query));
1481 
1482 	switch (elem->cmd_data.vlan_mac.cmd) {
1483 	case BNX2X_VLAN_MAC_ADD:
1484 		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1485 		break;
1486 	case BNX2X_VLAN_MAC_DEL:
1487 		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1488 		break;
1489 	default:
1490 		/* Don't handle anything other than ADD or DEL */
1491 		return 0;
1492 	}
1493 
1494 	/* If we found the appropriate element - delete it */
1495 	pos = exeq->get(exeq, &query);
1496 	if (pos) {
1497 
1498 		/* Return the credit of the optimized command */
1499 		if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1500 			      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1501 			if ((query.cmd_data.vlan_mac.cmd ==
1502 			     BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1503 				BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1504 				return -EINVAL;
1505 			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1506 				BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1507 				return -EINVAL;
1508 			}
1509 		}
1510 
1511 		DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1512 			   (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1513 			   "ADD" : "DEL");
1514 
1515 		list_del(&pos->link);
1516 		bnx2x_exe_queue_free_elem(bp, pos);
1517 		return 1;
1518 	}
1519 
1520 	return 0;
1521 }
1522 
1523 /**
1524  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1525  *
1526  * @bp:	  device handle
1527  * @o:
1528  * @elem:
1529  * @restore:
1530  * @re:
1531  *
1532  * prepare a registry element according to the current command request.
1533  */
1534 static inline int bnx2x_vlan_mac_get_registry_elem(
1535 	struct bnx2x *bp,
1536 	struct bnx2x_vlan_mac_obj *o,
1537 	struct bnx2x_exeq_elem *elem,
1538 	bool restore,
1539 	struct bnx2x_vlan_mac_registry_elem **re)
1540 {
1541 	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1542 	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1543 
1544 	/* Allocate a new registry element if needed. */
1545 	if (!restore &&
1546 	    ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1547 		reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1548 		if (!reg_elem)
1549 			return -ENOMEM;
1550 
1551 		/* Get a new CAM offset */
1552 		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1553 			/*
1554 			 * This shell never happen, because we have checked the
1555 			 * CAM availiability in the 'validate'.
1556 			 */
1557 			WARN_ON(1);
1558 			kfree(reg_elem);
1559 			return -EINVAL;
1560 		}
1561 
1562 		DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1563 
1564 		/* Set a VLAN-MAC data */
1565 		memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1566 			  sizeof(reg_elem->u));
1567 
1568 		/* Copy the flags (needed for DEL and RESTORE flows) */
1569 		reg_elem->vlan_mac_flags =
1570 			elem->cmd_data.vlan_mac.vlan_mac_flags;
1571 	} else /* DEL, RESTORE */
1572 		reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1573 
1574 	*re = reg_elem;
1575 	return 0;
1576 }
1577 
1578 /**
1579  * bnx2x_execute_vlan_mac - execute vlan mac command
1580  *
1581  * @bp:			device handle
1582  * @qo:
1583  * @exe_chunk:
1584  * @ramrod_flags:
1585  *
1586  * go and send a ramrod!
1587  */
1588 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1589 				  union bnx2x_qable_obj *qo,
1590 				  struct list_head *exe_chunk,
1591 				  unsigned long *ramrod_flags)
1592 {
1593 	struct bnx2x_exeq_elem *elem;
1594 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1595 	struct bnx2x_raw_obj *r = &o->raw;
1596 	int rc, idx = 0;
1597 	bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1598 	bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1599 	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1600 	enum bnx2x_vlan_mac_cmd cmd;
1601 
1602 	/*
1603 	 * If DRIVER_ONLY execution is requested, cleanup a registry
1604 	 * and exit. Otherwise send a ramrod to FW.
1605 	 */
1606 	if (!drv_only) {
1607 		WARN_ON(r->check_pending(r));
1608 
1609 		/* Set pending */
1610 		r->set_pending(r);
1611 
1612 		/* Fill tha ramrod data */
1613 		list_for_each_entry(elem, exe_chunk, link) {
1614 			cmd = elem->cmd_data.vlan_mac.cmd;
1615 			/*
1616 			 * We will add to the target object in MOVE command, so
1617 			 * change the object for a CAM search.
1618 			 */
1619 			if (cmd == BNX2X_VLAN_MAC_MOVE)
1620 				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1621 			else
1622 				cam_obj = o;
1623 
1624 			rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1625 							      elem, restore,
1626 							      &reg_elem);
1627 			if (rc)
1628 				goto error_exit;
1629 
1630 			WARN_ON(!reg_elem);
1631 
1632 			/* Push a new entry into the registry */
1633 			if (!restore &&
1634 			    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1635 			    (cmd == BNX2X_VLAN_MAC_MOVE)))
1636 				list_add(&reg_elem->link, &cam_obj->head);
1637 
1638 			/* Configure a single command in a ramrod data buffer */
1639 			o->set_one_rule(bp, o, elem, idx,
1640 					reg_elem->cam_offset);
1641 
1642 			/* MOVE command consumes 2 entries in the ramrod data */
1643 			if (cmd == BNX2X_VLAN_MAC_MOVE)
1644 				idx += 2;
1645 			else
1646 				idx++;
1647 		}
1648 
1649 		/*
1650 		 *  No need for an explicit memory barrier here as long we would
1651 		 *  need to ensure the ordering of writing to the SPQ element
1652 		 *  and updating of the SPQ producer which involves a memory
1653 		 *  read and we will have to put a full memory barrier there
1654 		 *  (inside bnx2x_sp_post()).
1655 		 */
1656 
1657 		rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1658 				   U64_HI(r->rdata_mapping),
1659 				   U64_LO(r->rdata_mapping),
1660 				   ETH_CONNECTION_TYPE);
1661 		if (rc)
1662 			goto error_exit;
1663 	}
1664 
1665 	/* Now, when we are done with the ramrod - clean up the registry */
1666 	list_for_each_entry(elem, exe_chunk, link) {
1667 		cmd = elem->cmd_data.vlan_mac.cmd;
1668 		if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1669 		    (cmd == BNX2X_VLAN_MAC_MOVE)) {
1670 			reg_elem = o->check_del(bp, o,
1671 						&elem->cmd_data.vlan_mac.u);
1672 
1673 			WARN_ON(!reg_elem);
1674 
1675 			o->put_cam_offset(o, reg_elem->cam_offset);
1676 			list_del(&reg_elem->link);
1677 			kfree(reg_elem);
1678 		}
1679 	}
1680 
1681 	if (!drv_only)
1682 		return 1;
1683 	else
1684 		return 0;
1685 
1686 error_exit:
1687 	r->clear_pending(r);
1688 
1689 	/* Cleanup a registry in case of a failure */
1690 	list_for_each_entry(elem, exe_chunk, link) {
1691 		cmd = elem->cmd_data.vlan_mac.cmd;
1692 
1693 		if (cmd == BNX2X_VLAN_MAC_MOVE)
1694 			cam_obj = elem->cmd_data.vlan_mac.target_obj;
1695 		else
1696 			cam_obj = o;
1697 
1698 		/* Delete all newly added above entries */
1699 		if (!restore &&
1700 		    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1701 		    (cmd == BNX2X_VLAN_MAC_MOVE))) {
1702 			reg_elem = o->check_del(bp, cam_obj,
1703 						&elem->cmd_data.vlan_mac.u);
1704 			if (reg_elem) {
1705 				list_del(&reg_elem->link);
1706 				kfree(reg_elem);
1707 			}
1708 		}
1709 	}
1710 
1711 	return rc;
1712 }
1713 
1714 static inline int bnx2x_vlan_mac_push_new_cmd(
1715 	struct bnx2x *bp,
1716 	struct bnx2x_vlan_mac_ramrod_params *p)
1717 {
1718 	struct bnx2x_exeq_elem *elem;
1719 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1720 	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1721 
1722 	/* Allocate the execution queue element */
1723 	elem = bnx2x_exe_queue_alloc_elem(bp);
1724 	if (!elem)
1725 		return -ENOMEM;
1726 
1727 	/* Set the command 'length' */
1728 	switch (p->user_req.cmd) {
1729 	case BNX2X_VLAN_MAC_MOVE:
1730 		elem->cmd_len = 2;
1731 		break;
1732 	default:
1733 		elem->cmd_len = 1;
1734 	}
1735 
1736 	/* Fill the object specific info */
1737 	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1738 
1739 	/* Try to add a new command to the pending list */
1740 	return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1741 }
1742 
1743 /**
1744  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1745  *
1746  * @bp:	  device handle
1747  * @p:
1748  *
1749  */
1750 int bnx2x_config_vlan_mac(
1751 	struct bnx2x *bp,
1752 	struct bnx2x_vlan_mac_ramrod_params *p)
1753 {
1754 	int rc = 0;
1755 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1756 	unsigned long *ramrod_flags = &p->ramrod_flags;
1757 	bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1758 	struct bnx2x_raw_obj *raw = &o->raw;
1759 
1760 	/*
1761 	 * Add new elements to the execution list for commands that require it.
1762 	 */
1763 	if (!cont) {
1764 		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1765 		if (rc)
1766 			return rc;
1767 	}
1768 
1769 	/*
1770 	 * If nothing will be executed further in this iteration we want to
1771 	 * return PENDING if there are pending commands
1772 	 */
1773 	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1774 		rc = 1;
1775 
1776 	if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1777 		DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1778 		raw->clear_pending(raw);
1779 	}
1780 
1781 	/* Execute commands if required */
1782 	if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1783 	    test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1784 		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1785 		if (rc < 0)
1786 			return rc;
1787 	}
1788 
1789 	/*
1790 	 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1791 	 * then user want to wait until the last command is done.
1792 	 */
1793 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1794 		/*
1795 		 * Wait maximum for the current exe_queue length iterations plus
1796 		 * one (for the current pending command).
1797 		 */
1798 		int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1799 
1800 		while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1801 		       max_iterations--) {
1802 
1803 			/* Wait for the current command to complete */
1804 			rc = raw->wait_comp(bp, raw);
1805 			if (rc)
1806 				return rc;
1807 
1808 			/* Make a next step */
1809 			rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1810 						  ramrod_flags);
1811 			if (rc < 0)
1812 				return rc;
1813 		}
1814 
1815 		return 0;
1816 	}
1817 
1818 	return rc;
1819 }
1820 
1821 
1822 
1823 /**
1824  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1825  *
1826  * @bp:			device handle
1827  * @o:
1828  * @vlan_mac_flags:
1829  * @ramrod_flags:	execution flags to be used for this deletion
1830  *
1831  * if the last operation has completed successfully and there are no
1832  * moreelements left, positive value if the last operation has completed
1833  * successfully and there are more previously configured elements, negative
1834  * value is current operation has failed.
1835  */
1836 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1837 				  struct bnx2x_vlan_mac_obj *o,
1838 				  unsigned long *vlan_mac_flags,
1839 				  unsigned long *ramrod_flags)
1840 {
1841 	struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1842 	int rc = 0;
1843 	struct bnx2x_vlan_mac_ramrod_params p;
1844 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1845 	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1846 
1847 	/* Clear pending commands first */
1848 
1849 	spin_lock_bh(&exeq->lock);
1850 
1851 	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1852 		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1853 		    *vlan_mac_flags) {
1854 			rc = exeq->remove(bp, exeq->owner, exeq_pos);
1855 			if (rc) {
1856 				BNX2X_ERR("Failed to remove command\n");
1857 				spin_unlock_bh(&exeq->lock);
1858 				return rc;
1859 			}
1860 			list_del(&exeq_pos->link);
1861 			bnx2x_exe_queue_free_elem(bp, exeq_pos);
1862 		}
1863 	}
1864 
1865 	spin_unlock_bh(&exeq->lock);
1866 
1867 	/* Prepare a command request */
1868 	memset(&p, 0, sizeof(p));
1869 	p.vlan_mac_obj = o;
1870 	p.ramrod_flags = *ramrod_flags;
1871 	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1872 
1873 	/*
1874 	 * Add all but the last VLAN-MAC to the execution queue without actually
1875 	 * execution anything.
1876 	 */
1877 	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1878 	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1879 	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
1880 
1881 	list_for_each_entry(pos, &o->head, link) {
1882 		if (pos->vlan_mac_flags == *vlan_mac_flags) {
1883 			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1884 			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1885 			rc = bnx2x_config_vlan_mac(bp, &p);
1886 			if (rc < 0) {
1887 				BNX2X_ERR("Failed to add a new DEL command\n");
1888 				return rc;
1889 			}
1890 		}
1891 	}
1892 
1893 	p.ramrod_flags = *ramrod_flags;
1894 	__set_bit(RAMROD_CONT, &p.ramrod_flags);
1895 
1896 	return bnx2x_config_vlan_mac(bp, &p);
1897 }
1898 
1899 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1900 	u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1901 	unsigned long *pstate, bnx2x_obj_type type)
1902 {
1903 	raw->func_id = func_id;
1904 	raw->cid = cid;
1905 	raw->cl_id = cl_id;
1906 	raw->rdata = rdata;
1907 	raw->rdata_mapping = rdata_mapping;
1908 	raw->state = state;
1909 	raw->pstate = pstate;
1910 	raw->obj_type = type;
1911 	raw->check_pending = bnx2x_raw_check_pending;
1912 	raw->clear_pending = bnx2x_raw_clear_pending;
1913 	raw->set_pending = bnx2x_raw_set_pending;
1914 	raw->wait_comp = bnx2x_raw_wait;
1915 }
1916 
1917 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1918 	u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1919 	int state, unsigned long *pstate, bnx2x_obj_type type,
1920 	struct bnx2x_credit_pool_obj *macs_pool,
1921 	struct bnx2x_credit_pool_obj *vlans_pool)
1922 {
1923 	INIT_LIST_HEAD(&o->head);
1924 
1925 	o->macs_pool = macs_pool;
1926 	o->vlans_pool = vlans_pool;
1927 
1928 	o->delete_all = bnx2x_vlan_mac_del_all;
1929 	o->restore = bnx2x_vlan_mac_restore;
1930 	o->complete = bnx2x_complete_vlan_mac;
1931 	o->wait = bnx2x_wait_vlan_mac;
1932 
1933 	bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1934 			   state, pstate, type);
1935 }
1936 
1937 
1938 void bnx2x_init_mac_obj(struct bnx2x *bp,
1939 			struct bnx2x_vlan_mac_obj *mac_obj,
1940 			u8 cl_id, u32 cid, u8 func_id, void *rdata,
1941 			dma_addr_t rdata_mapping, int state,
1942 			unsigned long *pstate, bnx2x_obj_type type,
1943 			struct bnx2x_credit_pool_obj *macs_pool)
1944 {
1945 	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1946 
1947 	bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1948 				   rdata_mapping, state, pstate, type,
1949 				   macs_pool, NULL);
1950 
1951 	/* CAM credit pool handling */
1952 	mac_obj->get_credit = bnx2x_get_credit_mac;
1953 	mac_obj->put_credit = bnx2x_put_credit_mac;
1954 	mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1955 	mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1956 
1957 	if (CHIP_IS_E1x(bp)) {
1958 		mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1959 		mac_obj->check_del         = bnx2x_check_mac_del;
1960 		mac_obj->check_add         = bnx2x_check_mac_add;
1961 		mac_obj->check_move        = bnx2x_check_move_always_err;
1962 		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1963 
1964 		/* Exe Queue */
1965 		bnx2x_exe_queue_init(bp,
1966 				     &mac_obj->exe_queue, 1, qable_obj,
1967 				     bnx2x_validate_vlan_mac,
1968 				     bnx2x_remove_vlan_mac,
1969 				     bnx2x_optimize_vlan_mac,
1970 				     bnx2x_execute_vlan_mac,
1971 				     bnx2x_exeq_get_mac);
1972 	} else {
1973 		mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1974 		mac_obj->check_del         = bnx2x_check_mac_del;
1975 		mac_obj->check_add         = bnx2x_check_mac_add;
1976 		mac_obj->check_move        = bnx2x_check_move;
1977 		mac_obj->ramrod_cmd        =
1978 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1979 		mac_obj->get_n_elements    = bnx2x_get_n_elements;
1980 
1981 		/* Exe Queue */
1982 		bnx2x_exe_queue_init(bp,
1983 				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1984 				     qable_obj, bnx2x_validate_vlan_mac,
1985 				     bnx2x_remove_vlan_mac,
1986 				     bnx2x_optimize_vlan_mac,
1987 				     bnx2x_execute_vlan_mac,
1988 				     bnx2x_exeq_get_mac);
1989 	}
1990 }
1991 
1992 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1993 			 struct bnx2x_vlan_mac_obj *vlan_obj,
1994 			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1995 			 dma_addr_t rdata_mapping, int state,
1996 			 unsigned long *pstate, bnx2x_obj_type type,
1997 			 struct bnx2x_credit_pool_obj *vlans_pool)
1998 {
1999 	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2000 
2001 	bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2002 				   rdata_mapping, state, pstate, type, NULL,
2003 				   vlans_pool);
2004 
2005 	vlan_obj->get_credit = bnx2x_get_credit_vlan;
2006 	vlan_obj->put_credit = bnx2x_put_credit_vlan;
2007 	vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2008 	vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2009 
2010 	if (CHIP_IS_E1x(bp)) {
2011 		BNX2X_ERR("Do not support chips others than E2 and newer\n");
2012 		BUG();
2013 	} else {
2014 		vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
2015 		vlan_obj->check_del         = bnx2x_check_vlan_del;
2016 		vlan_obj->check_add         = bnx2x_check_vlan_add;
2017 		vlan_obj->check_move        = bnx2x_check_move;
2018 		vlan_obj->ramrod_cmd        =
2019 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2020 		vlan_obj->get_n_elements    = bnx2x_get_n_elements;
2021 
2022 		/* Exe Queue */
2023 		bnx2x_exe_queue_init(bp,
2024 				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2025 				     qable_obj, bnx2x_validate_vlan_mac,
2026 				     bnx2x_remove_vlan_mac,
2027 				     bnx2x_optimize_vlan_mac,
2028 				     bnx2x_execute_vlan_mac,
2029 				     bnx2x_exeq_get_vlan);
2030 	}
2031 }
2032 
2033 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2034 			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2035 			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
2036 			     dma_addr_t rdata_mapping, int state,
2037 			     unsigned long *pstate, bnx2x_obj_type type,
2038 			     struct bnx2x_credit_pool_obj *macs_pool,
2039 			     struct bnx2x_credit_pool_obj *vlans_pool)
2040 {
2041 	union bnx2x_qable_obj *qable_obj =
2042 		(union bnx2x_qable_obj *)vlan_mac_obj;
2043 
2044 	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2045 				   rdata_mapping, state, pstate, type,
2046 				   macs_pool, vlans_pool);
2047 
2048 	/* CAM pool handling */
2049 	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2050 	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2051 	/*
2052 	 * CAM offset is relevant for 57710 and 57711 chips only which have a
2053 	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2054 	 * will be taken from MACs' pool object only.
2055 	 */
2056 	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2057 	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2058 
2059 	if (CHIP_IS_E1(bp)) {
2060 		BNX2X_ERR("Do not support chips others than E2\n");
2061 		BUG();
2062 	} else if (CHIP_IS_E1H(bp)) {
2063 		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
2064 		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2065 		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2066 		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
2067 		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2068 
2069 		/* Exe Queue */
2070 		bnx2x_exe_queue_init(bp,
2071 				     &vlan_mac_obj->exe_queue, 1, qable_obj,
2072 				     bnx2x_validate_vlan_mac,
2073 				     bnx2x_remove_vlan_mac,
2074 				     bnx2x_optimize_vlan_mac,
2075 				     bnx2x_execute_vlan_mac,
2076 				     bnx2x_exeq_get_vlan_mac);
2077 	} else {
2078 		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
2079 		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2080 		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2081 		vlan_mac_obj->check_move        = bnx2x_check_move;
2082 		vlan_mac_obj->ramrod_cmd        =
2083 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2084 
2085 		/* Exe Queue */
2086 		bnx2x_exe_queue_init(bp,
2087 				     &vlan_mac_obj->exe_queue,
2088 				     CLASSIFY_RULES_COUNT,
2089 				     qable_obj, bnx2x_validate_vlan_mac,
2090 				     bnx2x_remove_vlan_mac,
2091 				     bnx2x_optimize_vlan_mac,
2092 				     bnx2x_execute_vlan_mac,
2093 				     bnx2x_exeq_get_vlan_mac);
2094 	}
2095 
2096 }
2097 
2098 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2099 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2100 			struct tstorm_eth_mac_filter_config *mac_filters,
2101 			u16 pf_id)
2102 {
2103 	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2104 
2105 	u32 addr = BAR_TSTRORM_INTMEM +
2106 			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2107 
2108 	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2109 }
2110 
2111 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2112 				 struct bnx2x_rx_mode_ramrod_params *p)
2113 {
2114 	/* update the bp MAC filter structure */
2115 	u32 mask = (1 << p->cl_id);
2116 
2117 	struct tstorm_eth_mac_filter_config *mac_filters =
2118 		(struct tstorm_eth_mac_filter_config *)p->rdata;
2119 
2120 	/* initial seeting is drop-all */
2121 	u8 drop_all_ucast = 1, drop_all_mcast = 1;
2122 	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2123 	u8 unmatched_unicast = 0;
2124 
2125     /* In e1x there we only take into account rx acceot flag since tx switching
2126      * isn't enabled. */
2127 	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2128 		/* accept matched ucast */
2129 		drop_all_ucast = 0;
2130 
2131 	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2132 		/* accept matched mcast */
2133 		drop_all_mcast = 0;
2134 
2135 	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2136 		/* accept all mcast */
2137 		drop_all_ucast = 0;
2138 		accp_all_ucast = 1;
2139 	}
2140 	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2141 		/* accept all mcast */
2142 		drop_all_mcast = 0;
2143 		accp_all_mcast = 1;
2144 	}
2145 	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2146 		/* accept (all) bcast */
2147 		accp_all_bcast = 1;
2148 	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2149 		/* accept unmatched unicasts */
2150 		unmatched_unicast = 1;
2151 
2152 	mac_filters->ucast_drop_all = drop_all_ucast ?
2153 		mac_filters->ucast_drop_all | mask :
2154 		mac_filters->ucast_drop_all & ~mask;
2155 
2156 	mac_filters->mcast_drop_all = drop_all_mcast ?
2157 		mac_filters->mcast_drop_all | mask :
2158 		mac_filters->mcast_drop_all & ~mask;
2159 
2160 	mac_filters->ucast_accept_all = accp_all_ucast ?
2161 		mac_filters->ucast_accept_all | mask :
2162 		mac_filters->ucast_accept_all & ~mask;
2163 
2164 	mac_filters->mcast_accept_all = accp_all_mcast ?
2165 		mac_filters->mcast_accept_all | mask :
2166 		mac_filters->mcast_accept_all & ~mask;
2167 
2168 	mac_filters->bcast_accept_all = accp_all_bcast ?
2169 		mac_filters->bcast_accept_all | mask :
2170 		mac_filters->bcast_accept_all & ~mask;
2171 
2172 	mac_filters->unmatched_unicast = unmatched_unicast ?
2173 		mac_filters->unmatched_unicast | mask :
2174 		mac_filters->unmatched_unicast & ~mask;
2175 
2176 	DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2177 			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2178 	   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2179 	   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2180 	   mac_filters->bcast_accept_all);
2181 
2182 	/* write the MAC filter structure*/
2183 	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
2184 
2185 	/* The operation is completed */
2186 	clear_bit(p->state, p->pstate);
2187 	smp_mb__after_clear_bit();
2188 
2189 	return 0;
2190 }
2191 
2192 /* Setup ramrod data */
2193 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2194 				struct eth_classify_header *hdr,
2195 				u8 rule_cnt)
2196 {
2197 	hdr->echo = cpu_to_le32(cid);
2198 	hdr->rule_cnt = rule_cnt;
2199 }
2200 
2201 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2202 				unsigned long *accept_flags,
2203 				struct eth_filter_rules_cmd *cmd,
2204 				bool clear_accept_all)
2205 {
2206 	u16 state;
2207 
2208 	/* start with 'drop-all' */
2209 	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2210 		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2211 
2212 	if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2213 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2214 
2215 	if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2216 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2217 
2218 	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2219 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2220 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2221 	}
2222 
2223 	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2224 		state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2225 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2226 	}
2227 
2228 	if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2229 		state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2230 
2231 	if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2232 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2233 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2234 	}
2235 
2236 	if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2237 		state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2238 
2239 	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2240 	if (clear_accept_all) {
2241 		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2242 		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2243 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2244 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2245 	}
2246 
2247 	cmd->state = cpu_to_le16(state);
2248 
2249 }
2250 
2251 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2252 				struct bnx2x_rx_mode_ramrod_params *p)
2253 {
2254 	struct eth_filter_rules_ramrod_data *data = p->rdata;
2255 	int rc;
2256 	u8 rule_idx = 0;
2257 
2258 	/* Reset the ramrod data buffer */
2259 	memset(data, 0, sizeof(*data));
2260 
2261 	/* Setup ramrod data */
2262 
2263 	/* Tx (internal switching) */
2264 	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2265 		data->rules[rule_idx].client_id = p->cl_id;
2266 		data->rules[rule_idx].func_id = p->func_id;
2267 
2268 		data->rules[rule_idx].cmd_general_data =
2269 			ETH_FILTER_RULES_CMD_TX_CMD;
2270 
2271 		bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2272 					       &(data->rules[rule_idx++]),
2273 					       false);
2274 	}
2275 
2276 	/* Rx */
2277 	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2278 		data->rules[rule_idx].client_id = p->cl_id;
2279 		data->rules[rule_idx].func_id = p->func_id;
2280 
2281 		data->rules[rule_idx].cmd_general_data =
2282 			ETH_FILTER_RULES_CMD_RX_CMD;
2283 
2284 		bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2285 					       &(data->rules[rule_idx++]),
2286 					       false);
2287 	}
2288 
2289 
2290 	/*
2291 	 * If FCoE Queue configuration has been requested configure the Rx and
2292 	 * internal switching modes for this queue in separate rules.
2293 	 *
2294 	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2295 	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2296 	 */
2297 	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2298 		/*  Tx (internal switching) */
2299 		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2300 			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2301 			data->rules[rule_idx].func_id = p->func_id;
2302 
2303 			data->rules[rule_idx].cmd_general_data =
2304 						ETH_FILTER_RULES_CMD_TX_CMD;
2305 
2306 			bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2307 						       &(data->rules[rule_idx]),
2308 						       true);
2309 			rule_idx++;
2310 		}
2311 
2312 		/* Rx */
2313 		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2314 			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2315 			data->rules[rule_idx].func_id = p->func_id;
2316 
2317 			data->rules[rule_idx].cmd_general_data =
2318 						ETH_FILTER_RULES_CMD_RX_CMD;
2319 
2320 			bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2321 						       &(data->rules[rule_idx]),
2322 						       true);
2323 			rule_idx++;
2324 		}
2325 	}
2326 
2327 	/*
2328 	 * Set the ramrod header (most importantly - number of rules to
2329 	 * configure).
2330 	 */
2331 	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2332 
2333 	DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2334 			 data->header.rule_cnt, p->rx_accept_flags,
2335 			 p->tx_accept_flags);
2336 
2337 	/*
2338 	 *  No need for an explicit memory barrier here as long we would
2339 	 *  need to ensure the ordering of writing to the SPQ element
2340 	 *  and updating of the SPQ producer which involves a memory
2341 	 *  read and we will have to put a full memory barrier there
2342 	 *  (inside bnx2x_sp_post()).
2343 	 */
2344 
2345 	/* Send a ramrod */
2346 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2347 			   U64_HI(p->rdata_mapping),
2348 			   U64_LO(p->rdata_mapping),
2349 			   ETH_CONNECTION_TYPE);
2350 	if (rc)
2351 		return rc;
2352 
2353 	/* Ramrod completion is pending */
2354 	return 1;
2355 }
2356 
2357 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2358 				      struct bnx2x_rx_mode_ramrod_params *p)
2359 {
2360 	return bnx2x_state_wait(bp, p->state, p->pstate);
2361 }
2362 
2363 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2364 				    struct bnx2x_rx_mode_ramrod_params *p)
2365 {
2366 	/* Do nothing */
2367 	return 0;
2368 }
2369 
2370 int bnx2x_config_rx_mode(struct bnx2x *bp,
2371 			 struct bnx2x_rx_mode_ramrod_params *p)
2372 {
2373 	int rc;
2374 
2375 	/* Configure the new classification in the chip */
2376 	rc = p->rx_mode_obj->config_rx_mode(bp, p);
2377 	if (rc < 0)
2378 		return rc;
2379 
2380 	/* Wait for a ramrod completion if was requested */
2381 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2382 		rc = p->rx_mode_obj->wait_comp(bp, p);
2383 		if (rc)
2384 			return rc;
2385 	}
2386 
2387 	return rc;
2388 }
2389 
2390 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2391 			    struct bnx2x_rx_mode_obj *o)
2392 {
2393 	if (CHIP_IS_E1x(bp)) {
2394 		o->wait_comp      = bnx2x_empty_rx_mode_wait;
2395 		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2396 	} else {
2397 		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2398 		o->config_rx_mode = bnx2x_set_rx_mode_e2;
2399 	}
2400 }
2401 
2402 /********************* Multicast verbs: SET, CLEAR ****************************/
2403 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2404 {
2405 	return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2406 }
2407 
2408 struct bnx2x_mcast_mac_elem {
2409 	struct list_head link;
2410 	u8 mac[ETH_ALEN];
2411 	u8 pad[2]; /* For a natural alignment of the following buffer */
2412 };
2413 
2414 struct bnx2x_pending_mcast_cmd {
2415 	struct list_head link;
2416 	int type; /* BNX2X_MCAST_CMD_X */
2417 	union {
2418 		struct list_head macs_head;
2419 		u32 macs_num; /* Needed for DEL command */
2420 		int next_bin; /* Needed for RESTORE flow with aprox match */
2421 	} data;
2422 
2423 	bool done; /* set to true, when the command has been handled,
2424 		    * practically used in 57712 handling only, where one pending
2425 		    * command may be handled in a few operations. As long as for
2426 		    * other chips every operation handling is completed in a
2427 		    * single ramrod, there is no need to utilize this field.
2428 		    */
2429 };
2430 
2431 static int bnx2x_mcast_wait(struct bnx2x *bp,
2432 			    struct bnx2x_mcast_obj *o)
2433 {
2434 	if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2435 			o->raw.wait_comp(bp, &o->raw))
2436 		return -EBUSY;
2437 
2438 	return 0;
2439 }
2440 
2441 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2442 				   struct bnx2x_mcast_obj *o,
2443 				   struct bnx2x_mcast_ramrod_params *p,
2444 				   enum bnx2x_mcast_cmd cmd)
2445 {
2446 	int total_sz;
2447 	struct bnx2x_pending_mcast_cmd *new_cmd;
2448 	struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2449 	struct bnx2x_mcast_list_elem *pos;
2450 	int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2451 			     p->mcast_list_len : 0);
2452 
2453 	/* If the command is empty ("handle pending commands only"), break */
2454 	if (!p->mcast_list_len)
2455 		return 0;
2456 
2457 	total_sz = sizeof(*new_cmd) +
2458 		macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2459 
2460 	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2461 	new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2462 
2463 	if (!new_cmd)
2464 		return -ENOMEM;
2465 
2466 	DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2467 	   cmd, macs_list_len);
2468 
2469 	INIT_LIST_HEAD(&new_cmd->data.macs_head);
2470 
2471 	new_cmd->type = cmd;
2472 	new_cmd->done = false;
2473 
2474 	switch (cmd) {
2475 	case BNX2X_MCAST_CMD_ADD:
2476 		cur_mac = (struct bnx2x_mcast_mac_elem *)
2477 			  ((u8 *)new_cmd + sizeof(*new_cmd));
2478 
2479 		/* Push the MACs of the current command into the pendig command
2480 		 * MACs list: FIFO
2481 		 */
2482 		list_for_each_entry(pos, &p->mcast_list, link) {
2483 			memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2484 			list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2485 			cur_mac++;
2486 		}
2487 
2488 		break;
2489 
2490 	case BNX2X_MCAST_CMD_DEL:
2491 		new_cmd->data.macs_num = p->mcast_list_len;
2492 		break;
2493 
2494 	case BNX2X_MCAST_CMD_RESTORE:
2495 		new_cmd->data.next_bin = 0;
2496 		break;
2497 
2498 	default:
2499 		kfree(new_cmd);
2500 		BNX2X_ERR("Unknown command: %d\n", cmd);
2501 		return -EINVAL;
2502 	}
2503 
2504 	/* Push the new pending command to the tail of the pending list: FIFO */
2505 	list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2506 
2507 	o->set_sched(o);
2508 
2509 	return 1;
2510 }
2511 
2512 /**
2513  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2514  *
2515  * @o:
2516  * @last:	index to start looking from (including)
2517  *
2518  * returns the next found (set) bin or a negative value if none is found.
2519  */
2520 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2521 {
2522 	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2523 
2524 	for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2525 		if (o->registry.aprox_match.vec[i])
2526 			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2527 				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2528 				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2529 						       vec, cur_bit)) {
2530 					return cur_bit;
2531 				}
2532 			}
2533 		inner_start = 0;
2534 	}
2535 
2536 	/* None found */
2537 	return -1;
2538 }
2539 
2540 /**
2541  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2542  *
2543  * @o:
2544  *
2545  * returns the index of the found bin or -1 if none is found
2546  */
2547 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2548 {
2549 	int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2550 
2551 	if (cur_bit >= 0)
2552 		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2553 
2554 	return cur_bit;
2555 }
2556 
2557 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2558 {
2559 	struct bnx2x_raw_obj *raw = &o->raw;
2560 	u8 rx_tx_flag = 0;
2561 
2562 	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2563 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2564 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2565 
2566 	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2567 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2568 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2569 
2570 	return rx_tx_flag;
2571 }
2572 
2573 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2574 					struct bnx2x_mcast_obj *o, int idx,
2575 					union bnx2x_mcast_config_data *cfg_data,
2576 					enum bnx2x_mcast_cmd cmd)
2577 {
2578 	struct bnx2x_raw_obj *r = &o->raw;
2579 	struct eth_multicast_rules_ramrod_data *data =
2580 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2581 	u8 func_id = r->func_id;
2582 	u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2583 	int bin;
2584 
2585 	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2586 		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2587 
2588 	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2589 
2590 	/* Get a bin and update a bins' vector */
2591 	switch (cmd) {
2592 	case BNX2X_MCAST_CMD_ADD:
2593 		bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2594 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2595 		break;
2596 
2597 	case BNX2X_MCAST_CMD_DEL:
2598 		/* If there were no more bins to clear
2599 		 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2600 		 * clear any (0xff) bin.
2601 		 * See bnx2x_mcast_validate_e2() for explanation when it may
2602 		 * happen.
2603 		 */
2604 		bin = bnx2x_mcast_clear_first_bin(o);
2605 		break;
2606 
2607 	case BNX2X_MCAST_CMD_RESTORE:
2608 		bin = cfg_data->bin;
2609 		break;
2610 
2611 	default:
2612 		BNX2X_ERR("Unknown command: %d\n", cmd);
2613 		return;
2614 	}
2615 
2616 	DP(BNX2X_MSG_SP, "%s bin %d\n",
2617 			 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2618 			 "Setting"  : "Clearing"), bin);
2619 
2620 	data->rules[idx].bin_id    = (u8)bin;
2621 	data->rules[idx].func_id   = func_id;
2622 	data->rules[idx].engine_id = o->engine_id;
2623 }
2624 
2625 /**
2626  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2627  *
2628  * @bp:		device handle
2629  * @o:
2630  * @start_bin:	index in the registry to start from (including)
2631  * @rdata_idx:	index in the ramrod data to start from
2632  *
2633  * returns last handled bin index or -1 if all bins have been handled
2634  */
2635 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2636 	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2637 	int *rdata_idx)
2638 {
2639 	int cur_bin, cnt = *rdata_idx;
2640 	union bnx2x_mcast_config_data cfg_data = {NULL};
2641 
2642 	/* go through the registry and configure the bins from it */
2643 	for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2644 	    cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2645 
2646 		cfg_data.bin = (u8)cur_bin;
2647 		o->set_one_rule(bp, o, cnt, &cfg_data,
2648 				BNX2X_MCAST_CMD_RESTORE);
2649 
2650 		cnt++;
2651 
2652 		DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2653 
2654 		/* Break if we reached the maximum number
2655 		 * of rules.
2656 		 */
2657 		if (cnt >= o->max_cmd_len)
2658 			break;
2659 	}
2660 
2661 	*rdata_idx = cnt;
2662 
2663 	return cur_bin;
2664 }
2665 
2666 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2667 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2668 	int *line_idx)
2669 {
2670 	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2671 	int cnt = *line_idx;
2672 	union bnx2x_mcast_config_data cfg_data = {NULL};
2673 
2674 	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2675 				 link) {
2676 
2677 		cfg_data.mac = &pmac_pos->mac[0];
2678 		o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2679 
2680 		cnt++;
2681 
2682 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2683 		   pmac_pos->mac);
2684 
2685 		list_del(&pmac_pos->link);
2686 
2687 		/* Break if we reached the maximum number
2688 		 * of rules.
2689 		 */
2690 		if (cnt >= o->max_cmd_len)
2691 			break;
2692 	}
2693 
2694 	*line_idx = cnt;
2695 
2696 	/* if no more MACs to configure - we are done */
2697 	if (list_empty(&cmd_pos->data.macs_head))
2698 		cmd_pos->done = true;
2699 }
2700 
2701 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2702 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2703 	int *line_idx)
2704 {
2705 	int cnt = *line_idx;
2706 
2707 	while (cmd_pos->data.macs_num) {
2708 		o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2709 
2710 		cnt++;
2711 
2712 		cmd_pos->data.macs_num--;
2713 
2714 		  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2715 				   cmd_pos->data.macs_num, cnt);
2716 
2717 		/* Break if we reached the maximum
2718 		 * number of rules.
2719 		 */
2720 		if (cnt >= o->max_cmd_len)
2721 			break;
2722 	}
2723 
2724 	*line_idx = cnt;
2725 
2726 	/* If we cleared all bins - we are done */
2727 	if (!cmd_pos->data.macs_num)
2728 		cmd_pos->done = true;
2729 }
2730 
2731 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2732 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2733 	int *line_idx)
2734 {
2735 	cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2736 						line_idx);
2737 
2738 	if (cmd_pos->data.next_bin < 0)
2739 		/* If o->set_restore returned -1 we are done */
2740 		cmd_pos->done = true;
2741 	else
2742 		/* Start from the next bin next time */
2743 		cmd_pos->data.next_bin++;
2744 }
2745 
2746 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2747 				struct bnx2x_mcast_ramrod_params *p)
2748 {
2749 	struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2750 	int cnt = 0;
2751 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2752 
2753 	list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2754 				 link) {
2755 		switch (cmd_pos->type) {
2756 		case BNX2X_MCAST_CMD_ADD:
2757 			bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2758 			break;
2759 
2760 		case BNX2X_MCAST_CMD_DEL:
2761 			bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2762 			break;
2763 
2764 		case BNX2X_MCAST_CMD_RESTORE:
2765 			bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2766 							   &cnt);
2767 			break;
2768 
2769 		default:
2770 			BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2771 			return -EINVAL;
2772 		}
2773 
2774 		/* If the command has been completed - remove it from the list
2775 		 * and free the memory
2776 		 */
2777 		if (cmd_pos->done) {
2778 			list_del(&cmd_pos->link);
2779 			kfree(cmd_pos);
2780 		}
2781 
2782 		/* Break if we reached the maximum number of rules */
2783 		if (cnt >= o->max_cmd_len)
2784 			break;
2785 	}
2786 
2787 	return cnt;
2788 }
2789 
2790 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2791 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2792 	int *line_idx)
2793 {
2794 	struct bnx2x_mcast_list_elem *mlist_pos;
2795 	union bnx2x_mcast_config_data cfg_data = {NULL};
2796 	int cnt = *line_idx;
2797 
2798 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2799 		cfg_data.mac = mlist_pos->mac;
2800 		o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2801 
2802 		cnt++;
2803 
2804 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2805 		   mlist_pos->mac);
2806 	}
2807 
2808 	*line_idx = cnt;
2809 }
2810 
2811 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2812 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2813 	int *line_idx)
2814 {
2815 	int cnt = *line_idx, i;
2816 
2817 	for (i = 0; i < p->mcast_list_len; i++) {
2818 		o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2819 
2820 		cnt++;
2821 
2822 		DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2823 				 p->mcast_list_len - i - 1);
2824 	}
2825 
2826 	*line_idx = cnt;
2827 }
2828 
2829 /**
2830  * bnx2x_mcast_handle_current_cmd -
2831  *
2832  * @bp:		device handle
2833  * @p:
2834  * @cmd:
2835  * @start_cnt:	first line in the ramrod data that may be used
2836  *
2837  * This function is called iff there is enough place for the current command in
2838  * the ramrod data.
2839  * Returns number of lines filled in the ramrod data in total.
2840  */
2841 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2842 			struct bnx2x_mcast_ramrod_params *p,
2843 			enum bnx2x_mcast_cmd cmd,
2844 			int start_cnt)
2845 {
2846 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2847 	int cnt = start_cnt;
2848 
2849 	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2850 
2851 	switch (cmd) {
2852 	case BNX2X_MCAST_CMD_ADD:
2853 		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2854 		break;
2855 
2856 	case BNX2X_MCAST_CMD_DEL:
2857 		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2858 		break;
2859 
2860 	case BNX2X_MCAST_CMD_RESTORE:
2861 		o->hdl_restore(bp, o, 0, &cnt);
2862 		break;
2863 
2864 	default:
2865 		BNX2X_ERR("Unknown command: %d\n", cmd);
2866 		return -EINVAL;
2867 	}
2868 
2869 	/* The current command has been handled */
2870 	p->mcast_list_len = 0;
2871 
2872 	return cnt;
2873 }
2874 
2875 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2876 				   struct bnx2x_mcast_ramrod_params *p,
2877 				   enum bnx2x_mcast_cmd cmd)
2878 {
2879 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2880 	int reg_sz = o->get_registry_size(o);
2881 
2882 	switch (cmd) {
2883 	/* DEL command deletes all currently configured MACs */
2884 	case BNX2X_MCAST_CMD_DEL:
2885 		o->set_registry_size(o, 0);
2886 		/* Don't break */
2887 
2888 	/* RESTORE command will restore the entire multicast configuration */
2889 	case BNX2X_MCAST_CMD_RESTORE:
2890 		/* Here we set the approximate amount of work to do, which in
2891 		 * fact may be only less as some MACs in postponed ADD
2892 		 * command(s) scheduled before this command may fall into
2893 		 * the same bin and the actual number of bins set in the
2894 		 * registry would be less than we estimated here. See
2895 		 * bnx2x_mcast_set_one_rule_e2() for further details.
2896 		 */
2897 		p->mcast_list_len = reg_sz;
2898 		break;
2899 
2900 	case BNX2X_MCAST_CMD_ADD:
2901 	case BNX2X_MCAST_CMD_CONT:
2902 		/* Here we assume that all new MACs will fall into new bins.
2903 		 * However we will correct the real registry size after we
2904 		 * handle all pending commands.
2905 		 */
2906 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
2907 		break;
2908 
2909 	default:
2910 		BNX2X_ERR("Unknown command: %d\n", cmd);
2911 		return -EINVAL;
2912 
2913 	}
2914 
2915 	/* Increase the total number of MACs pending to be configured */
2916 	o->total_pending_num += p->mcast_list_len;
2917 
2918 	return 0;
2919 }
2920 
2921 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2922 				      struct bnx2x_mcast_ramrod_params *p,
2923 				      int old_num_bins)
2924 {
2925 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2926 
2927 	o->set_registry_size(o, old_num_bins);
2928 	o->total_pending_num -= p->mcast_list_len;
2929 }
2930 
2931 /**
2932  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2933  *
2934  * @bp:		device handle
2935  * @p:
2936  * @len:	number of rules to handle
2937  */
2938 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2939 					struct bnx2x_mcast_ramrod_params *p,
2940 					u8 len)
2941 {
2942 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2943 	struct eth_multicast_rules_ramrod_data *data =
2944 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2945 
2946 	data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2947 					(BNX2X_FILTER_MCAST_PENDING <<
2948 					 BNX2X_SWCID_SHIFT));
2949 	data->header.rule_cnt = len;
2950 }
2951 
2952 /**
2953  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2954  *
2955  * @bp:		device handle
2956  * @o:
2957  *
2958  * Recalculate the actual number of set bins in the registry using Brian
2959  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2960  *
2961  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2962  */
2963 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2964 						  struct bnx2x_mcast_obj *o)
2965 {
2966 	int i, cnt = 0;
2967 	u64 elem;
2968 
2969 	for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2970 		elem = o->registry.aprox_match.vec[i];
2971 		for (; elem; cnt++)
2972 			elem &= elem - 1;
2973 	}
2974 
2975 	o->set_registry_size(o, cnt);
2976 
2977 	return 0;
2978 }
2979 
2980 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2981 				struct bnx2x_mcast_ramrod_params *p,
2982 				enum bnx2x_mcast_cmd cmd)
2983 {
2984 	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2985 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2986 	struct eth_multicast_rules_ramrod_data *data =
2987 		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2988 	int cnt = 0, rc;
2989 
2990 	/* Reset the ramrod data buffer */
2991 	memset(data, 0, sizeof(*data));
2992 
2993 	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2994 
2995 	/* If there are no more pending commands - clear SCHEDULED state */
2996 	if (list_empty(&o->pending_cmds_head))
2997 		o->clear_sched(o);
2998 
2999 	/* The below may be true iff there was enough room in ramrod
3000 	 * data for all pending commands and for the current
3001 	 * command. Otherwise the current command would have been added
3002 	 * to the pending commands and p->mcast_list_len would have been
3003 	 * zeroed.
3004 	 */
3005 	if (p->mcast_list_len > 0)
3006 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3007 
3008 	/* We've pulled out some MACs - update the total number of
3009 	 * outstanding.
3010 	 */
3011 	o->total_pending_num -= cnt;
3012 
3013 	/* send a ramrod */
3014 	WARN_ON(o->total_pending_num < 0);
3015 	WARN_ON(cnt > o->max_cmd_len);
3016 
3017 	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3018 
3019 	/* Update a registry size if there are no more pending operations.
3020 	 *
3021 	 * We don't want to change the value of the registry size if there are
3022 	 * pending operations because we want it to always be equal to the
3023 	 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3024 	 * set bins after the last requested operation in order to properly
3025 	 * evaluate the size of the next DEL/RESTORE operation.
3026 	 *
3027 	 * Note that we update the registry itself during command(s) handling
3028 	 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3029 	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3030 	 * with a limited amount of update commands (per MAC/bin) and we don't
3031 	 * know in this scope what the actual state of bins configuration is
3032 	 * going to be after this ramrod.
3033 	 */
3034 	if (!o->total_pending_num)
3035 		bnx2x_mcast_refresh_registry_e2(bp, o);
3036 
3037 	/*
3038 	 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3039 	 * RAMROD_PENDING status immediately.
3040 	 */
3041 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3042 		raw->clear_pending(raw);
3043 		return 0;
3044 	} else {
3045 		/*
3046 		 *  No need for an explicit memory barrier here as long we would
3047 		 *  need to ensure the ordering of writing to the SPQ element
3048 		 *  and updating of the SPQ producer which involves a memory
3049 		 *  read and we will have to put a full memory barrier there
3050 		 *  (inside bnx2x_sp_post()).
3051 		 */
3052 
3053 		/* Send a ramrod */
3054 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3055 				   raw->cid, U64_HI(raw->rdata_mapping),
3056 				   U64_LO(raw->rdata_mapping),
3057 				   ETH_CONNECTION_TYPE);
3058 		if (rc)
3059 			return rc;
3060 
3061 		/* Ramrod completion is pending */
3062 		return 1;
3063 	}
3064 }
3065 
3066 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3067 				    struct bnx2x_mcast_ramrod_params *p,
3068 				    enum bnx2x_mcast_cmd cmd)
3069 {
3070 	/* Mark, that there is a work to do */
3071 	if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3072 		p->mcast_list_len = 1;
3073 
3074 	return 0;
3075 }
3076 
3077 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3078 				       struct bnx2x_mcast_ramrod_params *p,
3079 				       int old_num_bins)
3080 {
3081 	/* Do nothing */
3082 }
3083 
3084 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3085 do { \
3086 	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3087 } while (0)
3088 
3089 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3090 					   struct bnx2x_mcast_obj *o,
3091 					   struct bnx2x_mcast_ramrod_params *p,
3092 					   u32 *mc_filter)
3093 {
3094 	struct bnx2x_mcast_list_elem *mlist_pos;
3095 	int bit;
3096 
3097 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3098 		bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3099 		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3100 
3101 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3102 		   mlist_pos->mac, bit);
3103 
3104 		/* bookkeeping... */
3105 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3106 				  bit);
3107 	}
3108 }
3109 
3110 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3111 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3112 	u32 *mc_filter)
3113 {
3114 	int bit;
3115 
3116 	for (bit = bnx2x_mcast_get_next_bin(o, 0);
3117 	     bit >= 0;
3118 	     bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3119 		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3120 		DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3121 	}
3122 }
3123 
3124 /* On 57711 we write the multicast MACs' aproximate match
3125  * table by directly into the TSTORM's internal RAM. So we don't
3126  * really need to handle any tricks to make it work.
3127  */
3128 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3129 				 struct bnx2x_mcast_ramrod_params *p,
3130 				 enum bnx2x_mcast_cmd cmd)
3131 {
3132 	int i;
3133 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3134 	struct bnx2x_raw_obj *r = &o->raw;
3135 
3136 	/* If CLEAR_ONLY has been requested - clear the registry
3137 	 * and clear a pending bit.
3138 	 */
3139 	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3140 		u32 mc_filter[MC_HASH_SIZE] = {0};
3141 
3142 		/* Set the multicast filter bits before writing it into
3143 		 * the internal memory.
3144 		 */
3145 		switch (cmd) {
3146 		case BNX2X_MCAST_CMD_ADD:
3147 			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3148 			break;
3149 
3150 		case BNX2X_MCAST_CMD_DEL:
3151 			DP(BNX2X_MSG_SP,
3152 			   "Invalidating multicast MACs configuration\n");
3153 
3154 			/* clear the registry */
3155 			memset(o->registry.aprox_match.vec, 0,
3156 			       sizeof(o->registry.aprox_match.vec));
3157 			break;
3158 
3159 		case BNX2X_MCAST_CMD_RESTORE:
3160 			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3161 			break;
3162 
3163 		default:
3164 			BNX2X_ERR("Unknown command: %d\n", cmd);
3165 			return -EINVAL;
3166 		}
3167 
3168 		/* Set the mcast filter in the internal memory */
3169 		for (i = 0; i < MC_HASH_SIZE; i++)
3170 			REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3171 	} else
3172 		/* clear the registry */
3173 		memset(o->registry.aprox_match.vec, 0,
3174 		       sizeof(o->registry.aprox_match.vec));
3175 
3176 	/* We are done */
3177 	r->clear_pending(r);
3178 
3179 	return 0;
3180 }
3181 
3182 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3183 				   struct bnx2x_mcast_ramrod_params *p,
3184 				   enum bnx2x_mcast_cmd cmd)
3185 {
3186 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3187 	int reg_sz = o->get_registry_size(o);
3188 
3189 	switch (cmd) {
3190 	/* DEL command deletes all currently configured MACs */
3191 	case BNX2X_MCAST_CMD_DEL:
3192 		o->set_registry_size(o, 0);
3193 		/* Don't break */
3194 
3195 	/* RESTORE command will restore the entire multicast configuration */
3196 	case BNX2X_MCAST_CMD_RESTORE:
3197 		p->mcast_list_len = reg_sz;
3198 		  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3199 				   cmd, p->mcast_list_len);
3200 		break;
3201 
3202 	case BNX2X_MCAST_CMD_ADD:
3203 	case BNX2X_MCAST_CMD_CONT:
3204 		/* Multicast MACs on 57710 are configured as unicast MACs and
3205 		 * there is only a limited number of CAM entries for that
3206 		 * matter.
3207 		 */
3208 		if (p->mcast_list_len > o->max_cmd_len) {
3209 			BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3210 				  o->max_cmd_len);
3211 			return -EINVAL;
3212 		}
3213 		/* Every configured MAC should be cleared if DEL command is
3214 		 * called. Only the last ADD command is relevant as long as
3215 		 * every ADD commands overrides the previous configuration.
3216 		 */
3217 		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3218 		if (p->mcast_list_len > 0)
3219 			o->set_registry_size(o, p->mcast_list_len);
3220 
3221 		break;
3222 
3223 	default:
3224 		BNX2X_ERR("Unknown command: %d\n", cmd);
3225 		return -EINVAL;
3226 
3227 	}
3228 
3229 	/* We want to ensure that commands are executed one by one for 57710.
3230 	 * Therefore each none-empty command will consume o->max_cmd_len.
3231 	 */
3232 	if (p->mcast_list_len)
3233 		o->total_pending_num += o->max_cmd_len;
3234 
3235 	return 0;
3236 }
3237 
3238 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3239 				      struct bnx2x_mcast_ramrod_params *p,
3240 				      int old_num_macs)
3241 {
3242 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3243 
3244 	o->set_registry_size(o, old_num_macs);
3245 
3246 	/* If current command hasn't been handled yet and we are
3247 	 * here means that it's meant to be dropped and we have to
3248 	 * update the number of outstandling MACs accordingly.
3249 	 */
3250 	if (p->mcast_list_len)
3251 		o->total_pending_num -= o->max_cmd_len;
3252 }
3253 
3254 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3255 					struct bnx2x_mcast_obj *o, int idx,
3256 					union bnx2x_mcast_config_data *cfg_data,
3257 					enum bnx2x_mcast_cmd cmd)
3258 {
3259 	struct bnx2x_raw_obj *r = &o->raw;
3260 	struct mac_configuration_cmd *data =
3261 		(struct mac_configuration_cmd *)(r->rdata);
3262 
3263 	/* copy mac */
3264 	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3265 		bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3266 				      &data->config_table[idx].middle_mac_addr,
3267 				      &data->config_table[idx].lsb_mac_addr,
3268 				      cfg_data->mac);
3269 
3270 		data->config_table[idx].vlan_id = 0;
3271 		data->config_table[idx].pf_id = r->func_id;
3272 		data->config_table[idx].clients_bit_vector =
3273 			cpu_to_le32(1 << r->cl_id);
3274 
3275 		SET_FLAG(data->config_table[idx].flags,
3276 			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3277 			 T_ETH_MAC_COMMAND_SET);
3278 	}
3279 }
3280 
3281 /**
3282  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3283  *
3284  * @bp:		device handle
3285  * @p:
3286  * @len:	number of rules to handle
3287  */
3288 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3289 					struct bnx2x_mcast_ramrod_params *p,
3290 					u8 len)
3291 {
3292 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3293 	struct mac_configuration_cmd *data =
3294 		(struct mac_configuration_cmd *)(r->rdata);
3295 
3296 	u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3297 		     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3298 		     BNX2X_MAX_MULTICAST*(1 + r->func_id));
3299 
3300 	data->hdr.offset = offset;
3301 	data->hdr.client_id = cpu_to_le16(0xff);
3302 	data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3303 				     (BNX2X_FILTER_MCAST_PENDING <<
3304 				      BNX2X_SWCID_SHIFT));
3305 	data->hdr.length = len;
3306 }
3307 
3308 /**
3309  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3310  *
3311  * @bp:		device handle
3312  * @o:
3313  * @start_idx:	index in the registry to start from
3314  * @rdata_idx:	index in the ramrod data to start from
3315  *
3316  * restore command for 57710 is like all other commands - always a stand alone
3317  * command - start_idx and rdata_idx will always be 0. This function will always
3318  * succeed.
3319  * returns -1 to comply with 57712 variant.
3320  */
3321 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3322 	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3323 	int *rdata_idx)
3324 {
3325 	struct bnx2x_mcast_mac_elem *elem;
3326 	int i = 0;
3327 	union bnx2x_mcast_config_data cfg_data = {NULL};
3328 
3329 	/* go through the registry and configure the MACs from it. */
3330 	list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3331 		cfg_data.mac = &elem->mac[0];
3332 		o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3333 
3334 		i++;
3335 
3336 		  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3337 		     cfg_data.mac);
3338 	}
3339 
3340 	*rdata_idx = i;
3341 
3342 	return -1;
3343 }
3344 
3345 
3346 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3347 	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3348 {
3349 	struct bnx2x_pending_mcast_cmd *cmd_pos;
3350 	struct bnx2x_mcast_mac_elem *pmac_pos;
3351 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3352 	union bnx2x_mcast_config_data cfg_data = {NULL};
3353 	int cnt = 0;
3354 
3355 
3356 	/* If nothing to be done - return */
3357 	if (list_empty(&o->pending_cmds_head))
3358 		return 0;
3359 
3360 	/* Handle the first command */
3361 	cmd_pos = list_first_entry(&o->pending_cmds_head,
3362 				   struct bnx2x_pending_mcast_cmd, link);
3363 
3364 	switch (cmd_pos->type) {
3365 	case BNX2X_MCAST_CMD_ADD:
3366 		list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3367 			cfg_data.mac = &pmac_pos->mac[0];
3368 			o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3369 
3370 			cnt++;
3371 
3372 			DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3373 			   pmac_pos->mac);
3374 		}
3375 		break;
3376 
3377 	case BNX2X_MCAST_CMD_DEL:
3378 		cnt = cmd_pos->data.macs_num;
3379 		DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3380 		break;
3381 
3382 	case BNX2X_MCAST_CMD_RESTORE:
3383 		o->hdl_restore(bp, o, 0, &cnt);
3384 		break;
3385 
3386 	default:
3387 		BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3388 		return -EINVAL;
3389 	}
3390 
3391 	list_del(&cmd_pos->link);
3392 	kfree(cmd_pos);
3393 
3394 	return cnt;
3395 }
3396 
3397 /**
3398  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3399  *
3400  * @fw_hi:
3401  * @fw_mid:
3402  * @fw_lo:
3403  * @mac:
3404  */
3405 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3406 					 __le16 *fw_lo, u8 *mac)
3407 {
3408 	mac[1] = ((u8 *)fw_hi)[0];
3409 	mac[0] = ((u8 *)fw_hi)[1];
3410 	mac[3] = ((u8 *)fw_mid)[0];
3411 	mac[2] = ((u8 *)fw_mid)[1];
3412 	mac[5] = ((u8 *)fw_lo)[0];
3413 	mac[4] = ((u8 *)fw_lo)[1];
3414 }
3415 
3416 /**
3417  * bnx2x_mcast_refresh_registry_e1 -
3418  *
3419  * @bp:		device handle
3420  * @cnt:
3421  *
3422  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3423  * and update the registry correspondingly: if ADD - allocate a memory and add
3424  * the entries to the registry (list), if DELETE - clear the registry and free
3425  * the memory.
3426  */
3427 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3428 						  struct bnx2x_mcast_obj *o)
3429 {
3430 	struct bnx2x_raw_obj *raw = &o->raw;
3431 	struct bnx2x_mcast_mac_elem *elem;
3432 	struct mac_configuration_cmd *data =
3433 			(struct mac_configuration_cmd *)(raw->rdata);
3434 
3435 	/* If first entry contains a SET bit - the command was ADD,
3436 	 * otherwise - DEL_ALL
3437 	 */
3438 	if (GET_FLAG(data->config_table[0].flags,
3439 			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3440 		int i, len = data->hdr.length;
3441 
3442 		/* Break if it was a RESTORE command */
3443 		if (!list_empty(&o->registry.exact_match.macs))
3444 			return 0;
3445 
3446 		elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3447 		if (!elem) {
3448 			BNX2X_ERR("Failed to allocate registry memory\n");
3449 			return -ENOMEM;
3450 		}
3451 
3452 		for (i = 0; i < len; i++, elem++) {
3453 			bnx2x_get_fw_mac_addr(
3454 				&data->config_table[i].msb_mac_addr,
3455 				&data->config_table[i].middle_mac_addr,
3456 				&data->config_table[i].lsb_mac_addr,
3457 				elem->mac);
3458 			DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3459 			   elem->mac);
3460 			list_add_tail(&elem->link,
3461 				      &o->registry.exact_match.macs);
3462 		}
3463 	} else {
3464 		elem = list_first_entry(&o->registry.exact_match.macs,
3465 					struct bnx2x_mcast_mac_elem, link);
3466 		DP(BNX2X_MSG_SP, "Deleting a registry\n");
3467 		kfree(elem);
3468 		INIT_LIST_HEAD(&o->registry.exact_match.macs);
3469 	}
3470 
3471 	return 0;
3472 }
3473 
3474 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3475 				struct bnx2x_mcast_ramrod_params *p,
3476 				enum bnx2x_mcast_cmd cmd)
3477 {
3478 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3479 	struct bnx2x_raw_obj *raw = &o->raw;
3480 	struct mac_configuration_cmd *data =
3481 		(struct mac_configuration_cmd *)(raw->rdata);
3482 	int cnt = 0, i, rc;
3483 
3484 	/* Reset the ramrod data buffer */
3485 	memset(data, 0, sizeof(*data));
3486 
3487 	/* First set all entries as invalid */
3488 	for (i = 0; i < o->max_cmd_len ; i++)
3489 		SET_FLAG(data->config_table[i].flags,
3490 			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3491 			 T_ETH_MAC_COMMAND_INVALIDATE);
3492 
3493 	/* Handle pending commands first */
3494 	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3495 
3496 	/* If there are no more pending commands - clear SCHEDULED state */
3497 	if (list_empty(&o->pending_cmds_head))
3498 		o->clear_sched(o);
3499 
3500 	/* The below may be true iff there were no pending commands */
3501 	if (!cnt)
3502 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3503 
3504 	/* For 57710 every command has o->max_cmd_len length to ensure that
3505 	 * commands are done one at a time.
3506 	 */
3507 	o->total_pending_num -= o->max_cmd_len;
3508 
3509 	/* send a ramrod */
3510 
3511 	WARN_ON(cnt > o->max_cmd_len);
3512 
3513 	/* Set ramrod header (in particular, a number of entries to update) */
3514 	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3515 
3516 	/* update a registry: we need the registry contents to be always up
3517 	 * to date in order to be able to execute a RESTORE opcode. Here
3518 	 * we use the fact that for 57710 we sent one command at a time
3519 	 * hence we may take the registry update out of the command handling
3520 	 * and do it in a simpler way here.
3521 	 */
3522 	rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3523 	if (rc)
3524 		return rc;
3525 
3526 	/*
3527 	 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3528 	 * RAMROD_PENDING status immediately.
3529 	 */
3530 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3531 		raw->clear_pending(raw);
3532 		return 0;
3533 	} else {
3534 		/*
3535 		 *  No need for an explicit memory barrier here as long we would
3536 		 *  need to ensure the ordering of writing to the SPQ element
3537 		 *  and updating of the SPQ producer which involves a memory
3538 		 *  read and we will have to put a full memory barrier there
3539 		 *  (inside bnx2x_sp_post()).
3540 		 */
3541 
3542 		/* Send a ramrod */
3543 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3544 				   U64_HI(raw->rdata_mapping),
3545 				   U64_LO(raw->rdata_mapping),
3546 				   ETH_CONNECTION_TYPE);
3547 		if (rc)
3548 			return rc;
3549 
3550 		/* Ramrod completion is pending */
3551 		return 1;
3552 	}
3553 
3554 }
3555 
3556 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3557 {
3558 	return o->registry.exact_match.num_macs_set;
3559 }
3560 
3561 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3562 {
3563 	return o->registry.aprox_match.num_bins_set;
3564 }
3565 
3566 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3567 						int n)
3568 {
3569 	o->registry.exact_match.num_macs_set = n;
3570 }
3571 
3572 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3573 						int n)
3574 {
3575 	o->registry.aprox_match.num_bins_set = n;
3576 }
3577 
3578 int bnx2x_config_mcast(struct bnx2x *bp,
3579 		       struct bnx2x_mcast_ramrod_params *p,
3580 		       enum bnx2x_mcast_cmd cmd)
3581 {
3582 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3583 	struct bnx2x_raw_obj *r = &o->raw;
3584 	int rc = 0, old_reg_size;
3585 
3586 	/* This is needed to recover number of currently configured mcast macs
3587 	 * in case of failure.
3588 	 */
3589 	old_reg_size = o->get_registry_size(o);
3590 
3591 	/* Do some calculations and checks */
3592 	rc = o->validate(bp, p, cmd);
3593 	if (rc)
3594 		return rc;
3595 
3596 	/* Return if there is no work to do */
3597 	if ((!p->mcast_list_len) && (!o->check_sched(o)))
3598 		return 0;
3599 
3600 	DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3601 	   o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3602 
3603 	/* Enqueue the current command to the pending list if we can't complete
3604 	 * it in the current iteration
3605 	 */
3606 	if (r->check_pending(r) ||
3607 	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3608 		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3609 		if (rc < 0)
3610 			goto error_exit1;
3611 
3612 		/* As long as the current command is in a command list we
3613 		 * don't need to handle it separately.
3614 		 */
3615 		p->mcast_list_len = 0;
3616 	}
3617 
3618 	if (!r->check_pending(r)) {
3619 
3620 		/* Set 'pending' state */
3621 		r->set_pending(r);
3622 
3623 		/* Configure the new classification in the chip */
3624 		rc = o->config_mcast(bp, p, cmd);
3625 		if (rc < 0)
3626 			goto error_exit2;
3627 
3628 		/* Wait for a ramrod completion if was requested */
3629 		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3630 			rc = o->wait_comp(bp, o);
3631 	}
3632 
3633 	return rc;
3634 
3635 error_exit2:
3636 	r->clear_pending(r);
3637 
3638 error_exit1:
3639 	o->revert(bp, p, old_reg_size);
3640 
3641 	return rc;
3642 }
3643 
3644 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3645 {
3646 	smp_mb__before_clear_bit();
3647 	clear_bit(o->sched_state, o->raw.pstate);
3648 	smp_mb__after_clear_bit();
3649 }
3650 
3651 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3652 {
3653 	smp_mb__before_clear_bit();
3654 	set_bit(o->sched_state, o->raw.pstate);
3655 	smp_mb__after_clear_bit();
3656 }
3657 
3658 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3659 {
3660 	return !!test_bit(o->sched_state, o->raw.pstate);
3661 }
3662 
3663 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3664 {
3665 	return o->raw.check_pending(&o->raw) || o->check_sched(o);
3666 }
3667 
3668 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3669 			  struct bnx2x_mcast_obj *mcast_obj,
3670 			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3671 			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3672 			  int state, unsigned long *pstate, bnx2x_obj_type type)
3673 {
3674 	memset(mcast_obj, 0, sizeof(*mcast_obj));
3675 
3676 	bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3677 			   rdata, rdata_mapping, state, pstate, type);
3678 
3679 	mcast_obj->engine_id = engine_id;
3680 
3681 	INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3682 
3683 	mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3684 	mcast_obj->check_sched = bnx2x_mcast_check_sched;
3685 	mcast_obj->set_sched = bnx2x_mcast_set_sched;
3686 	mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3687 
3688 	if (CHIP_IS_E1(bp)) {
3689 		mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3690 		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3691 		mcast_obj->hdl_restore       =
3692 			bnx2x_mcast_handle_restore_cmd_e1;
3693 		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3694 
3695 		if (CHIP_REV_IS_SLOW(bp))
3696 			mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3697 		else
3698 			mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3699 
3700 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3701 		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3702 		mcast_obj->validate          = bnx2x_mcast_validate_e1;
3703 		mcast_obj->revert            = bnx2x_mcast_revert_e1;
3704 		mcast_obj->get_registry_size =
3705 			bnx2x_mcast_get_registry_size_exact;
3706 		mcast_obj->set_registry_size =
3707 			bnx2x_mcast_set_registry_size_exact;
3708 
3709 		/* 57710 is the only chip that uses the exact match for mcast
3710 		 * at the moment.
3711 		 */
3712 		INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3713 
3714 	} else if (CHIP_IS_E1H(bp)) {
3715 		mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3716 		mcast_obj->enqueue_cmd   = NULL;
3717 		mcast_obj->hdl_restore   = NULL;
3718 		mcast_obj->check_pending = bnx2x_mcast_check_pending;
3719 
3720 		/* 57711 doesn't send a ramrod, so it has unlimited credit
3721 		 * for one command.
3722 		 */
3723 		mcast_obj->max_cmd_len       = -1;
3724 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3725 		mcast_obj->set_one_rule      = NULL;
3726 		mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3727 		mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3728 		mcast_obj->get_registry_size =
3729 			bnx2x_mcast_get_registry_size_aprox;
3730 		mcast_obj->set_registry_size =
3731 			bnx2x_mcast_set_registry_size_aprox;
3732 	} else {
3733 		mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3734 		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3735 		mcast_obj->hdl_restore       =
3736 			bnx2x_mcast_handle_restore_cmd_e2;
3737 		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3738 		/* TODO: There should be a proper HSI define for this number!!!
3739 		 */
3740 		mcast_obj->max_cmd_len       = 16;
3741 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3742 		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3743 		mcast_obj->validate          = bnx2x_mcast_validate_e2;
3744 		mcast_obj->revert            = bnx2x_mcast_revert_e2;
3745 		mcast_obj->get_registry_size =
3746 			bnx2x_mcast_get_registry_size_aprox;
3747 		mcast_obj->set_registry_size =
3748 			bnx2x_mcast_set_registry_size_aprox;
3749 	}
3750 }
3751 
3752 /*************************** Credit handling **********************************/
3753 
3754 /**
3755  * atomic_add_ifless - add if the result is less than a given value.
3756  *
3757  * @v:	pointer of type atomic_t
3758  * @a:	the amount to add to v...
3759  * @u:	...if (v + a) is less than u.
3760  *
3761  * returns true if (v + a) was less than u, and false otherwise.
3762  *
3763  */
3764 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3765 {
3766 	int c, old;
3767 
3768 	c = atomic_read(v);
3769 	for (;;) {
3770 		if (unlikely(c + a >= u))
3771 			return false;
3772 
3773 		old = atomic_cmpxchg((v), c, c + a);
3774 		if (likely(old == c))
3775 			break;
3776 		c = old;
3777 	}
3778 
3779 	return true;
3780 }
3781 
3782 /**
3783  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3784  *
3785  * @v:	pointer of type atomic_t
3786  * @a:	the amount to dec from v...
3787  * @u:	...if (v - a) is more or equal than u.
3788  *
3789  * returns true if (v - a) was more or equal than u, and false
3790  * otherwise.
3791  */
3792 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3793 {
3794 	int c, old;
3795 
3796 	c = atomic_read(v);
3797 	for (;;) {
3798 		if (unlikely(c - a < u))
3799 			return false;
3800 
3801 		old = atomic_cmpxchg((v), c, c - a);
3802 		if (likely(old == c))
3803 			break;
3804 		c = old;
3805 	}
3806 
3807 	return true;
3808 }
3809 
3810 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3811 {
3812 	bool rc;
3813 
3814 	smp_mb();
3815 	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3816 	smp_mb();
3817 
3818 	return rc;
3819 }
3820 
3821 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3822 {
3823 	bool rc;
3824 
3825 	smp_mb();
3826 
3827 	/* Don't let to refill if credit + cnt > pool_sz */
3828 	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3829 
3830 	smp_mb();
3831 
3832 	return rc;
3833 }
3834 
3835 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3836 {
3837 	int cur_credit;
3838 
3839 	smp_mb();
3840 	cur_credit = atomic_read(&o->credit);
3841 
3842 	return cur_credit;
3843 }
3844 
3845 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3846 					  int cnt)
3847 {
3848 	return true;
3849 }
3850 
3851 
3852 static bool bnx2x_credit_pool_get_entry(
3853 	struct bnx2x_credit_pool_obj *o,
3854 	int *offset)
3855 {
3856 	int idx, vec, i;
3857 
3858 	*offset = -1;
3859 
3860 	/* Find "internal cam-offset" then add to base for this object... */
3861 	for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3862 
3863 		/* Skip the current vector if there are no free entries in it */
3864 		if (!o->pool_mirror[vec])
3865 			continue;
3866 
3867 		/* If we've got here we are going to find a free entry */
3868 		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3869 		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
3870 
3871 			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3872 				/* Got one!! */
3873 				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3874 				*offset = o->base_pool_offset + idx;
3875 				return true;
3876 			}
3877 	}
3878 
3879 	return false;
3880 }
3881 
3882 static bool bnx2x_credit_pool_put_entry(
3883 	struct bnx2x_credit_pool_obj *o,
3884 	int offset)
3885 {
3886 	if (offset < o->base_pool_offset)
3887 		return false;
3888 
3889 	offset -= o->base_pool_offset;
3890 
3891 	if (offset >= o->pool_sz)
3892 		return false;
3893 
3894 	/* Return the entry to the pool */
3895 	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3896 
3897 	return true;
3898 }
3899 
3900 static bool bnx2x_credit_pool_put_entry_always_true(
3901 	struct bnx2x_credit_pool_obj *o,
3902 	int offset)
3903 {
3904 	return true;
3905 }
3906 
3907 static bool bnx2x_credit_pool_get_entry_always_true(
3908 	struct bnx2x_credit_pool_obj *o,
3909 	int *offset)
3910 {
3911 	*offset = -1;
3912 	return true;
3913 }
3914 /**
3915  * bnx2x_init_credit_pool - initialize credit pool internals.
3916  *
3917  * @p:
3918  * @base:	Base entry in the CAM to use.
3919  * @credit:	pool size.
3920  *
3921  * If base is negative no CAM entries handling will be performed.
3922  * If credit is negative pool operations will always succeed (unlimited pool).
3923  *
3924  */
3925 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3926 					  int base, int credit)
3927 {
3928 	/* Zero the object first */
3929 	memset(p, 0, sizeof(*p));
3930 
3931 	/* Set the table to all 1s */
3932 	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3933 
3934 	/* Init a pool as full */
3935 	atomic_set(&p->credit, credit);
3936 
3937 	/* The total poll size */
3938 	p->pool_sz = credit;
3939 
3940 	p->base_pool_offset = base;
3941 
3942 	/* Commit the change */
3943 	smp_mb();
3944 
3945 	p->check = bnx2x_credit_pool_check;
3946 
3947 	/* if pool credit is negative - disable the checks */
3948 	if (credit >= 0) {
3949 		p->put      = bnx2x_credit_pool_put;
3950 		p->get      = bnx2x_credit_pool_get;
3951 		p->put_entry = bnx2x_credit_pool_put_entry;
3952 		p->get_entry = bnx2x_credit_pool_get_entry;
3953 	} else {
3954 		p->put      = bnx2x_credit_pool_always_true;
3955 		p->get      = bnx2x_credit_pool_always_true;
3956 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3957 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3958 	}
3959 
3960 	/* If base is negative - disable entries handling */
3961 	if (base < 0) {
3962 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3963 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3964 	}
3965 }
3966 
3967 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3968 				struct bnx2x_credit_pool_obj *p, u8 func_id,
3969 				u8 func_num)
3970 {
3971 /* TODO: this will be defined in consts as well... */
3972 #define BNX2X_CAM_SIZE_EMUL 5
3973 
3974 	int cam_sz;
3975 
3976 	if (CHIP_IS_E1(bp)) {
3977 		/* In E1, Multicast is saved in cam... */
3978 		if (!CHIP_REV_IS_SLOW(bp))
3979 			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3980 		else
3981 			cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3982 
3983 		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3984 
3985 	} else if (CHIP_IS_E1H(bp)) {
3986 		/* CAM credit is equaly divided between all active functions
3987 		 * on the PORT!.
3988 		 */
3989 		if ((func_num > 0)) {
3990 			if (!CHIP_REV_IS_SLOW(bp))
3991 				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3992 			else
3993 				cam_sz = BNX2X_CAM_SIZE_EMUL;
3994 			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3995 		} else {
3996 			/* this should never happen! Block MAC operations. */
3997 			bnx2x_init_credit_pool(p, 0, 0);
3998 		}
3999 
4000 	} else {
4001 
4002 		/*
4003 		 * CAM credit is equaly divided between all active functions
4004 		 * on the PATH.
4005 		 */
4006 		if ((func_num > 0)) {
4007 			if (!CHIP_REV_IS_SLOW(bp))
4008 				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4009 			else
4010 				cam_sz = BNX2X_CAM_SIZE_EMUL;
4011 
4012 			/*
4013 			 * No need for CAM entries handling for 57712 and
4014 			 * newer.
4015 			 */
4016 			bnx2x_init_credit_pool(p, -1, cam_sz);
4017 		} else {
4018 			/* this should never happen! Block MAC operations. */
4019 			bnx2x_init_credit_pool(p, 0, 0);
4020 		}
4021 
4022 	}
4023 }
4024 
4025 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4026 				 struct bnx2x_credit_pool_obj *p,
4027 				 u8 func_id,
4028 				 u8 func_num)
4029 {
4030 	if (CHIP_IS_E1x(bp)) {
4031 		/*
4032 		 * There is no VLAN credit in HW on 57710 and 57711 only
4033 		 * MAC / MAC-VLAN can be set
4034 		 */
4035 		bnx2x_init_credit_pool(p, 0, -1);
4036 	} else {
4037 		/*
4038 		 * CAM credit is equaly divided between all active functions
4039 		 * on the PATH.
4040 		 */
4041 		if (func_num > 0) {
4042 			int credit = MAX_VLAN_CREDIT_E2 / func_num;
4043 			bnx2x_init_credit_pool(p, func_id * credit, credit);
4044 		} else
4045 			/* this should never happen! Block VLAN operations. */
4046 			bnx2x_init_credit_pool(p, 0, 0);
4047 	}
4048 }
4049 
4050 /****************** RSS Configuration ******************/
4051 /**
4052  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4053  *
4054  * @bp:		driver hanlde
4055  * @p:		pointer to rss configuration
4056  *
4057  * Prints it when NETIF_MSG_IFUP debug level is configured.
4058  */
4059 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4060 					struct bnx2x_config_rss_params *p)
4061 {
4062 	int i;
4063 
4064 	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4065 	DP(BNX2X_MSG_SP, "0x0000: ");
4066 	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4067 		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4068 
4069 		/* Print 4 bytes in a line */
4070 		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4071 		    (((i + 1) & 0x3) == 0)) {
4072 			DP_CONT(BNX2X_MSG_SP, "\n");
4073 			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4074 		}
4075 	}
4076 
4077 	DP_CONT(BNX2X_MSG_SP, "\n");
4078 }
4079 
4080 /**
4081  * bnx2x_setup_rss - configure RSS
4082  *
4083  * @bp:		device handle
4084  * @p:		rss configuration
4085  *
4086  * sends on UPDATE ramrod for that matter.
4087  */
4088 static int bnx2x_setup_rss(struct bnx2x *bp,
4089 			   struct bnx2x_config_rss_params *p)
4090 {
4091 	struct bnx2x_rss_config_obj *o = p->rss_obj;
4092 	struct bnx2x_raw_obj *r = &o->raw;
4093 	struct eth_rss_update_ramrod_data *data =
4094 		(struct eth_rss_update_ramrod_data *)(r->rdata);
4095 	u8 rss_mode = 0;
4096 	int rc;
4097 
4098 	memset(data, 0, sizeof(*data));
4099 
4100 	DP(BNX2X_MSG_SP, "Configuring RSS\n");
4101 
4102 	/* Set an echo field */
4103 	data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4104 				 (r->state << BNX2X_SWCID_SHIFT));
4105 
4106 	/* RSS mode */
4107 	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4108 		rss_mode = ETH_RSS_MODE_DISABLED;
4109 	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4110 		rss_mode = ETH_RSS_MODE_REGULAR;
4111 
4112 	data->rss_mode = rss_mode;
4113 
4114 	DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4115 
4116 	/* RSS capabilities */
4117 	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4118 		data->capabilities |=
4119 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4120 
4121 	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4122 		data->capabilities |=
4123 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4124 
4125 	if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4126 		data->capabilities |=
4127 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4128 
4129 	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4130 		data->capabilities |=
4131 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4132 
4133 	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4134 		data->capabilities |=
4135 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4136 
4137 	if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4138 		data->capabilities |=
4139 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4140 
4141 	/* Hashing mask */
4142 	data->rss_result_mask = p->rss_result_mask;
4143 
4144 	/* RSS engine ID */
4145 	data->rss_engine_id = o->engine_id;
4146 
4147 	DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4148 
4149 	/* Indirection table */
4150 	memcpy(data->indirection_table, p->ind_table,
4151 		  T_ETH_INDIRECTION_TABLE_SIZE);
4152 
4153 	/* Remember the last configuration */
4154 	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4155 
4156 	/* Print the indirection table */
4157 	if (netif_msg_ifup(bp))
4158 		bnx2x_debug_print_ind_table(bp, p);
4159 
4160 	/* RSS keys */
4161 	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4162 		memcpy(&data->rss_key[0], &p->rss_key[0],
4163 		       sizeof(data->rss_key));
4164 		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4165 	}
4166 
4167 	/*
4168 	 *  No need for an explicit memory barrier here as long we would
4169 	 *  need to ensure the ordering of writing to the SPQ element
4170 	 *  and updating of the SPQ producer which involves a memory
4171 	 *  read and we will have to put a full memory barrier there
4172 	 *  (inside bnx2x_sp_post()).
4173 	 */
4174 
4175 	/* Send a ramrod */
4176 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4177 			   U64_HI(r->rdata_mapping),
4178 			   U64_LO(r->rdata_mapping),
4179 			   ETH_CONNECTION_TYPE);
4180 
4181 	if (rc < 0)
4182 		return rc;
4183 
4184 	return 1;
4185 }
4186 
4187 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4188 			     u8 *ind_table)
4189 {
4190 	memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4191 }
4192 
4193 int bnx2x_config_rss(struct bnx2x *bp,
4194 		     struct bnx2x_config_rss_params *p)
4195 {
4196 	int rc;
4197 	struct bnx2x_rss_config_obj *o = p->rss_obj;
4198 	struct bnx2x_raw_obj *r = &o->raw;
4199 
4200 	/* Do nothing if only driver cleanup was requested */
4201 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4202 		return 0;
4203 
4204 	r->set_pending(r);
4205 
4206 	rc = o->config_rss(bp, p);
4207 	if (rc < 0) {
4208 		r->clear_pending(r);
4209 		return rc;
4210 	}
4211 
4212 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4213 		rc = r->wait_comp(bp, r);
4214 
4215 	return rc;
4216 }
4217 
4218 
4219 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4220 			       struct bnx2x_rss_config_obj *rss_obj,
4221 			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4222 			       void *rdata, dma_addr_t rdata_mapping,
4223 			       int state, unsigned long *pstate,
4224 			       bnx2x_obj_type type)
4225 {
4226 	bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4227 			   rdata_mapping, state, pstate, type);
4228 
4229 	rss_obj->engine_id  = engine_id;
4230 	rss_obj->config_rss = bnx2x_setup_rss;
4231 }
4232 
4233 /********************** Queue state object ***********************************/
4234 
4235 /**
4236  * bnx2x_queue_state_change - perform Queue state change transition
4237  *
4238  * @bp:		device handle
4239  * @params:	parameters to perform the transition
4240  *
4241  * returns 0 in case of successfully completed transition, negative error
4242  * code in case of failure, positive (EBUSY) value if there is a completion
4243  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4244  * not set in params->ramrod_flags for asynchronous commands).
4245  *
4246  */
4247 int bnx2x_queue_state_change(struct bnx2x *bp,
4248 			     struct bnx2x_queue_state_params *params)
4249 {
4250 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4251 	int rc, pending_bit;
4252 	unsigned long *pending = &o->pending;
4253 
4254 	/* Check that the requested transition is legal */
4255 	rc = o->check_transition(bp, o, params);
4256 	if (rc) {
4257 		BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4258 		return -EINVAL;
4259 	}
4260 
4261 	/* Set "pending" bit */
4262 	DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4263 	pending_bit = o->set_pending(o, params);
4264 	DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4265 
4266 	/* Don't send a command if only driver cleanup was requested */
4267 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4268 		o->complete_cmd(bp, o, pending_bit);
4269 	else {
4270 		/* Send a ramrod */
4271 		rc = o->send_cmd(bp, params);
4272 		if (rc) {
4273 			o->next_state = BNX2X_Q_STATE_MAX;
4274 			clear_bit(pending_bit, pending);
4275 			smp_mb__after_clear_bit();
4276 			return rc;
4277 		}
4278 
4279 		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4280 			rc = o->wait_comp(bp, o, pending_bit);
4281 			if (rc)
4282 				return rc;
4283 
4284 			return 0;
4285 		}
4286 	}
4287 
4288 	return !!test_bit(pending_bit, pending);
4289 }
4290 
4291 
4292 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4293 				   struct bnx2x_queue_state_params *params)
4294 {
4295 	enum bnx2x_queue_cmd cmd = params->cmd, bit;
4296 
4297 	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4298 	 * UPDATE command.
4299 	 */
4300 	if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4301 	    (cmd == BNX2X_Q_CMD_DEACTIVATE))
4302 		bit = BNX2X_Q_CMD_UPDATE;
4303 	else
4304 		bit = cmd;
4305 
4306 	set_bit(bit, &obj->pending);
4307 	return bit;
4308 }
4309 
4310 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4311 				 struct bnx2x_queue_sp_obj *o,
4312 				 enum bnx2x_queue_cmd cmd)
4313 {
4314 	return bnx2x_state_wait(bp, cmd, &o->pending);
4315 }
4316 
4317 /**
4318  * bnx2x_queue_comp_cmd - complete the state change command.
4319  *
4320  * @bp:		device handle
4321  * @o:
4322  * @cmd:
4323  *
4324  * Checks that the arrived completion is expected.
4325  */
4326 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4327 				struct bnx2x_queue_sp_obj *o,
4328 				enum bnx2x_queue_cmd cmd)
4329 {
4330 	unsigned long cur_pending = o->pending;
4331 
4332 	if (!test_and_clear_bit(cmd, &cur_pending)) {
4333 		BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4334 			  cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4335 			  o->state, cur_pending, o->next_state);
4336 		return -EINVAL;
4337 	}
4338 
4339 	if (o->next_tx_only >= o->max_cos)
4340 		/* >= becuase tx only must always be smaller than cos since the
4341 		 * primary connection supports COS 0
4342 		 */
4343 		BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4344 			   o->next_tx_only, o->max_cos);
4345 
4346 	DP(BNX2X_MSG_SP,
4347 	   "Completing command %d for queue %d, setting state to %d\n",
4348 	   cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4349 
4350 	if (o->next_tx_only)  /* print num tx-only if any exist */
4351 		DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4352 		   o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4353 
4354 	o->state = o->next_state;
4355 	o->num_tx_only = o->next_tx_only;
4356 	o->next_state = BNX2X_Q_STATE_MAX;
4357 
4358 	/* It's important that o->state and o->next_state are
4359 	 * updated before o->pending.
4360 	 */
4361 	wmb();
4362 
4363 	clear_bit(cmd, &o->pending);
4364 	smp_mb__after_clear_bit();
4365 
4366 	return 0;
4367 }
4368 
4369 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4370 				struct bnx2x_queue_state_params *cmd_params,
4371 				struct client_init_ramrod_data *data)
4372 {
4373 	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4374 
4375 	/* Rx data */
4376 
4377 	/* IPv6 TPA supported for E2 and above only */
4378 	data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4379 				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4380 }
4381 
4382 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4383 				struct bnx2x_queue_sp_obj *o,
4384 				struct bnx2x_general_setup_params *params,
4385 				struct client_init_general_data *gen_data,
4386 				unsigned long *flags)
4387 {
4388 	gen_data->client_id = o->cl_id;
4389 
4390 	if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4391 		gen_data->statistics_counter_id =
4392 					params->stat_id;
4393 		gen_data->statistics_en_flg = 1;
4394 		gen_data->statistics_zero_flg =
4395 			test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4396 	} else
4397 		gen_data->statistics_counter_id =
4398 					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4399 
4400 	gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4401 	gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4402 	gen_data->sp_client_id = params->spcl_id;
4403 	gen_data->mtu = cpu_to_le16(params->mtu);
4404 	gen_data->func_id = o->func_id;
4405 
4406 
4407 	gen_data->cos = params->cos;
4408 
4409 	gen_data->traffic_type =
4410 		test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4411 		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4412 
4413 	DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4414 	   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4415 }
4416 
4417 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4418 				struct bnx2x_txq_setup_params *params,
4419 				struct client_init_tx_data *tx_data,
4420 				unsigned long *flags)
4421 {
4422 	tx_data->enforce_security_flg =
4423 		test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4424 	tx_data->default_vlan =
4425 		cpu_to_le16(params->default_vlan);
4426 	tx_data->default_vlan_flg =
4427 		test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4428 	tx_data->tx_switching_flg =
4429 		test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4430 	tx_data->anti_spoofing_flg =
4431 		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4432 	tx_data->force_default_pri_flg =
4433 		test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4434 
4435 	tx_data->tunnel_lso_inc_ip_id =
4436 		test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4437 	tx_data->tunnel_non_lso_pcsum_location =
4438 		test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4439 								  PCSUM_ON_BD;
4440 
4441 	tx_data->tx_status_block_id = params->fw_sb_id;
4442 	tx_data->tx_sb_index_number = params->sb_cq_index;
4443 	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4444 
4445 	tx_data->tx_bd_page_base.lo =
4446 		cpu_to_le32(U64_LO(params->dscr_map));
4447 	tx_data->tx_bd_page_base.hi =
4448 		cpu_to_le32(U64_HI(params->dscr_map));
4449 
4450 	/* Don't configure any Tx switching mode during queue SETUP */
4451 	tx_data->state = 0;
4452 }
4453 
4454 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4455 				struct rxq_pause_params *params,
4456 				struct client_init_rx_data *rx_data)
4457 {
4458 	/* flow control data */
4459 	rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4460 	rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4461 	rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4462 	rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4463 	rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4464 	rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4465 	rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4466 }
4467 
4468 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4469 				struct bnx2x_rxq_setup_params *params,
4470 				struct client_init_rx_data *rx_data,
4471 				unsigned long *flags)
4472 {
4473 	rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4474 				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4475 	rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4476 				CLIENT_INIT_RX_DATA_TPA_MODE;
4477 	rx_data->vmqueue_mode_en_flg = 0;
4478 
4479 	rx_data->cache_line_alignment_log_size =
4480 		params->cache_line_log;
4481 	rx_data->enable_dynamic_hc =
4482 		test_bit(BNX2X_Q_FLG_DHC, flags);
4483 	rx_data->max_sges_for_packet = params->max_sges_pkt;
4484 	rx_data->client_qzone_id = params->cl_qzone_id;
4485 	rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4486 
4487 	/* Always start in DROP_ALL mode */
4488 	rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4489 				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4490 
4491 	/* We don't set drop flags */
4492 	rx_data->drop_ip_cs_err_flg = 0;
4493 	rx_data->drop_tcp_cs_err_flg = 0;
4494 	rx_data->drop_ttl0_flg = 0;
4495 	rx_data->drop_udp_cs_err_flg = 0;
4496 	rx_data->inner_vlan_removal_enable_flg =
4497 		test_bit(BNX2X_Q_FLG_VLAN, flags);
4498 	rx_data->outer_vlan_removal_enable_flg =
4499 		test_bit(BNX2X_Q_FLG_OV, flags);
4500 	rx_data->status_block_id = params->fw_sb_id;
4501 	rx_data->rx_sb_index_number = params->sb_cq_index;
4502 	rx_data->max_tpa_queues = params->max_tpa_queues;
4503 	rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4504 	rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4505 	rx_data->bd_page_base.lo =
4506 		cpu_to_le32(U64_LO(params->dscr_map));
4507 	rx_data->bd_page_base.hi =
4508 		cpu_to_le32(U64_HI(params->dscr_map));
4509 	rx_data->sge_page_base.lo =
4510 		cpu_to_le32(U64_LO(params->sge_map));
4511 	rx_data->sge_page_base.hi =
4512 		cpu_to_le32(U64_HI(params->sge_map));
4513 	rx_data->cqe_page_base.lo =
4514 		cpu_to_le32(U64_LO(params->rcq_map));
4515 	rx_data->cqe_page_base.hi =
4516 		cpu_to_le32(U64_HI(params->rcq_map));
4517 	rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4518 
4519 	if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4520 		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4521 		rx_data->is_approx_mcast = 1;
4522 	}
4523 
4524 	rx_data->rss_engine_id = params->rss_engine_id;
4525 
4526 	/* silent vlan removal */
4527 	rx_data->silent_vlan_removal_flg =
4528 		test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4529 	rx_data->silent_vlan_value =
4530 		cpu_to_le16(params->silent_removal_value);
4531 	rx_data->silent_vlan_mask =
4532 		cpu_to_le16(params->silent_removal_mask);
4533 
4534 }
4535 
4536 /* initialize the general, tx and rx parts of a queue object */
4537 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4538 				struct bnx2x_queue_state_params *cmd_params,
4539 				struct client_init_ramrod_data *data)
4540 {
4541 	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4542 				       &cmd_params->params.setup.gen_params,
4543 				       &data->general,
4544 				       &cmd_params->params.setup.flags);
4545 
4546 	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4547 				  &cmd_params->params.setup.txq_params,
4548 				  &data->tx,
4549 				  &cmd_params->params.setup.flags);
4550 
4551 	bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4552 				  &cmd_params->params.setup.rxq_params,
4553 				  &data->rx,
4554 				  &cmd_params->params.setup.flags);
4555 
4556 	bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4557 				     &cmd_params->params.setup.pause_params,
4558 				     &data->rx);
4559 }
4560 
4561 /* initialize the general and tx parts of a tx-only queue object */
4562 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4563 				struct bnx2x_queue_state_params *cmd_params,
4564 				struct tx_queue_init_ramrod_data *data)
4565 {
4566 	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4567 				       &cmd_params->params.tx_only.gen_params,
4568 				       &data->general,
4569 				       &cmd_params->params.tx_only.flags);
4570 
4571 	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4572 				  &cmd_params->params.tx_only.txq_params,
4573 				  &data->tx,
4574 				  &cmd_params->params.tx_only.flags);
4575 
4576 	DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4577 			 cmd_params->q_obj->cids[0],
4578 			 data->tx.tx_bd_page_base.lo,
4579 			 data->tx.tx_bd_page_base.hi);
4580 }
4581 
4582 /**
4583  * bnx2x_q_init - init HW/FW queue
4584  *
4585  * @bp:		device handle
4586  * @params:
4587  *
4588  * HW/FW initial Queue configuration:
4589  *      - HC: Rx and Tx
4590  *      - CDU context validation
4591  *
4592  */
4593 static inline int bnx2x_q_init(struct bnx2x *bp,
4594 			       struct bnx2x_queue_state_params *params)
4595 {
4596 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4597 	struct bnx2x_queue_init_params *init = &params->params.init;
4598 	u16 hc_usec;
4599 	u8 cos;
4600 
4601 	/* Tx HC configuration */
4602 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4603 	    test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4604 		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4605 
4606 		bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4607 			init->tx.sb_cq_index,
4608 			!test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4609 			hc_usec);
4610 	}
4611 
4612 	/* Rx HC configuration */
4613 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4614 	    test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4615 		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4616 
4617 		bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4618 			init->rx.sb_cq_index,
4619 			!test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4620 			hc_usec);
4621 	}
4622 
4623 	/* Set CDU context validation values */
4624 	for (cos = 0; cos < o->max_cos; cos++) {
4625 		DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4626 				 o->cids[cos], cos);
4627 		DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4628 		bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4629 	}
4630 
4631 	/* As no ramrod is sent, complete the command immediately  */
4632 	o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4633 
4634 	mmiowb();
4635 	smp_mb();
4636 
4637 	return 0;
4638 }
4639 
4640 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4641 					struct bnx2x_queue_state_params *params)
4642 {
4643 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4644 	struct client_init_ramrod_data *rdata =
4645 		(struct client_init_ramrod_data *)o->rdata;
4646 	dma_addr_t data_mapping = o->rdata_mapping;
4647 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4648 
4649 	/* Clear the ramrod data */
4650 	memset(rdata, 0, sizeof(*rdata));
4651 
4652 	/* Fill the ramrod data */
4653 	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4654 
4655 	/*
4656 	 *  No need for an explicit memory barrier here as long we would
4657 	 *  need to ensure the ordering of writing to the SPQ element
4658 	 *  and updating of the SPQ producer which involves a memory
4659 	 *  read and we will have to put a full memory barrier there
4660 	 *  (inside bnx2x_sp_post()).
4661 	 */
4662 
4663 	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4664 			     U64_HI(data_mapping),
4665 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4666 }
4667 
4668 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4669 					struct bnx2x_queue_state_params *params)
4670 {
4671 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4672 	struct client_init_ramrod_data *rdata =
4673 		(struct client_init_ramrod_data *)o->rdata;
4674 	dma_addr_t data_mapping = o->rdata_mapping;
4675 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4676 
4677 	/* Clear the ramrod data */
4678 	memset(rdata, 0, sizeof(*rdata));
4679 
4680 	/* Fill the ramrod data */
4681 	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4682 	bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4683 
4684 	/*
4685 	 *  No need for an explicit memory barrier here as long we would
4686 	 *  need to ensure the ordering of writing to the SPQ element
4687 	 *  and updating of the SPQ producer which involves a memory
4688 	 *  read and we will have to put a full memory barrier there
4689 	 *  (inside bnx2x_sp_post()).
4690 	 */
4691 
4692 	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4693 			     U64_HI(data_mapping),
4694 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4695 }
4696 
4697 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4698 				  struct bnx2x_queue_state_params *params)
4699 {
4700 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4701 	struct tx_queue_init_ramrod_data *rdata =
4702 		(struct tx_queue_init_ramrod_data *)o->rdata;
4703 	dma_addr_t data_mapping = o->rdata_mapping;
4704 	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4705 	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4706 		&params->params.tx_only;
4707 	u8 cid_index = tx_only_params->cid_index;
4708 
4709 
4710 	if (cid_index >= o->max_cos) {
4711 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4712 			  o->cl_id, cid_index);
4713 		return -EINVAL;
4714 	}
4715 
4716 	DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4717 			 tx_only_params->gen_params.cos,
4718 			 tx_only_params->gen_params.spcl_id);
4719 
4720 	/* Clear the ramrod data */
4721 	memset(rdata, 0, sizeof(*rdata));
4722 
4723 	/* Fill the ramrod data */
4724 	bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4725 
4726 	DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4727 			 o->cids[cid_index], rdata->general.client_id,
4728 			 rdata->general.sp_client_id, rdata->general.cos);
4729 
4730 	/*
4731 	 *  No need for an explicit memory barrier here as long we would
4732 	 *  need to ensure the ordering of writing to the SPQ element
4733 	 *  and updating of the SPQ producer which involves a memory
4734 	 *  read and we will have to put a full memory barrier there
4735 	 *  (inside bnx2x_sp_post()).
4736 	 */
4737 
4738 	return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4739 			     U64_HI(data_mapping),
4740 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4741 }
4742 
4743 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4744 				     struct bnx2x_queue_sp_obj *obj,
4745 				     struct bnx2x_queue_update_params *params,
4746 				     struct client_update_ramrod_data *data)
4747 {
4748 	/* Client ID of the client to update */
4749 	data->client_id = obj->cl_id;
4750 
4751 	/* Function ID of the client to update */
4752 	data->func_id = obj->func_id;
4753 
4754 	/* Default VLAN value */
4755 	data->default_vlan = cpu_to_le16(params->def_vlan);
4756 
4757 	/* Inner VLAN stripping */
4758 	data->inner_vlan_removal_enable_flg =
4759 		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4760 	data->inner_vlan_removal_change_flg =
4761 		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4762 			 &params->update_flags);
4763 
4764 	/* Outer VLAN sripping */
4765 	data->outer_vlan_removal_enable_flg =
4766 		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4767 	data->outer_vlan_removal_change_flg =
4768 		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4769 			 &params->update_flags);
4770 
4771 	/* Drop packets that have source MAC that doesn't belong to this
4772 	 * Queue.
4773 	 */
4774 	data->anti_spoofing_enable_flg =
4775 		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4776 	data->anti_spoofing_change_flg =
4777 		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4778 
4779 	/* Activate/Deactivate */
4780 	data->activate_flg =
4781 		test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4782 	data->activate_change_flg =
4783 		test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4784 
4785 	/* Enable default VLAN */
4786 	data->default_vlan_enable_flg =
4787 		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4788 	data->default_vlan_change_flg =
4789 		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4790 			 &params->update_flags);
4791 
4792 	/* silent vlan removal */
4793 	data->silent_vlan_change_flg =
4794 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4795 			 &params->update_flags);
4796 	data->silent_vlan_removal_flg =
4797 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4798 	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4799 	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4800 }
4801 
4802 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4803 				      struct bnx2x_queue_state_params *params)
4804 {
4805 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4806 	struct client_update_ramrod_data *rdata =
4807 		(struct client_update_ramrod_data *)o->rdata;
4808 	dma_addr_t data_mapping = o->rdata_mapping;
4809 	struct bnx2x_queue_update_params *update_params =
4810 		&params->params.update;
4811 	u8 cid_index = update_params->cid_index;
4812 
4813 	if (cid_index >= o->max_cos) {
4814 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4815 			  o->cl_id, cid_index);
4816 		return -EINVAL;
4817 	}
4818 
4819 
4820 	/* Clear the ramrod data */
4821 	memset(rdata, 0, sizeof(*rdata));
4822 
4823 	/* Fill the ramrod data */
4824 	bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4825 
4826 	/*
4827 	 *  No need for an explicit memory barrier here as long we would
4828 	 *  need to ensure the ordering of writing to the SPQ element
4829 	 *  and updating of the SPQ producer which involves a memory
4830 	 *  read and we will have to put a full memory barrier there
4831 	 *  (inside bnx2x_sp_post()).
4832 	 */
4833 
4834 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4835 			     o->cids[cid_index], U64_HI(data_mapping),
4836 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4837 }
4838 
4839 /**
4840  * bnx2x_q_send_deactivate - send DEACTIVATE command
4841  *
4842  * @bp:		device handle
4843  * @params:
4844  *
4845  * implemented using the UPDATE command.
4846  */
4847 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4848 					struct bnx2x_queue_state_params *params)
4849 {
4850 	struct bnx2x_queue_update_params *update = &params->params.update;
4851 
4852 	memset(update, 0, sizeof(*update));
4853 
4854 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4855 
4856 	return bnx2x_q_send_update(bp, params);
4857 }
4858 
4859 /**
4860  * bnx2x_q_send_activate - send ACTIVATE command
4861  *
4862  * @bp:		device handle
4863  * @params:
4864  *
4865  * implemented using the UPDATE command.
4866  */
4867 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4868 					struct bnx2x_queue_state_params *params)
4869 {
4870 	struct bnx2x_queue_update_params *update = &params->params.update;
4871 
4872 	memset(update, 0, sizeof(*update));
4873 
4874 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4875 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4876 
4877 	return bnx2x_q_send_update(bp, params);
4878 }
4879 
4880 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4881 					struct bnx2x_queue_state_params *params)
4882 {
4883 	/* TODO: Not implemented yet. */
4884 	return -1;
4885 }
4886 
4887 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4888 				    struct bnx2x_queue_state_params *params)
4889 {
4890 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4891 
4892 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4893 			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4894 			     ETH_CONNECTION_TYPE);
4895 }
4896 
4897 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4898 				       struct bnx2x_queue_state_params *params)
4899 {
4900 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4901 	u8 cid_idx = params->params.cfc_del.cid_index;
4902 
4903 	if (cid_idx >= o->max_cos) {
4904 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4905 			  o->cl_id, cid_idx);
4906 		return -EINVAL;
4907 	}
4908 
4909 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4910 			     o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4911 }
4912 
4913 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4914 					struct bnx2x_queue_state_params *params)
4915 {
4916 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4917 	u8 cid_index = params->params.terminate.cid_index;
4918 
4919 	if (cid_index >= o->max_cos) {
4920 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4921 			  o->cl_id, cid_index);
4922 		return -EINVAL;
4923 	}
4924 
4925 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4926 			     o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4927 }
4928 
4929 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4930 				     struct bnx2x_queue_state_params *params)
4931 {
4932 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4933 
4934 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4935 			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4936 			     ETH_CONNECTION_TYPE);
4937 }
4938 
4939 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4940 					struct bnx2x_queue_state_params *params)
4941 {
4942 	switch (params->cmd) {
4943 	case BNX2X_Q_CMD_INIT:
4944 		return bnx2x_q_init(bp, params);
4945 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4946 		return bnx2x_q_send_setup_tx_only(bp, params);
4947 	case BNX2X_Q_CMD_DEACTIVATE:
4948 		return bnx2x_q_send_deactivate(bp, params);
4949 	case BNX2X_Q_CMD_ACTIVATE:
4950 		return bnx2x_q_send_activate(bp, params);
4951 	case BNX2X_Q_CMD_UPDATE:
4952 		return bnx2x_q_send_update(bp, params);
4953 	case BNX2X_Q_CMD_UPDATE_TPA:
4954 		return bnx2x_q_send_update_tpa(bp, params);
4955 	case BNX2X_Q_CMD_HALT:
4956 		return bnx2x_q_send_halt(bp, params);
4957 	case BNX2X_Q_CMD_CFC_DEL:
4958 		return bnx2x_q_send_cfc_del(bp, params);
4959 	case BNX2X_Q_CMD_TERMINATE:
4960 		return bnx2x_q_send_terminate(bp, params);
4961 	case BNX2X_Q_CMD_EMPTY:
4962 		return bnx2x_q_send_empty(bp, params);
4963 	default:
4964 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4965 		return -EINVAL;
4966 	}
4967 }
4968 
4969 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4970 				    struct bnx2x_queue_state_params *params)
4971 {
4972 	switch (params->cmd) {
4973 	case BNX2X_Q_CMD_SETUP:
4974 		return bnx2x_q_send_setup_e1x(bp, params);
4975 	case BNX2X_Q_CMD_INIT:
4976 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4977 	case BNX2X_Q_CMD_DEACTIVATE:
4978 	case BNX2X_Q_CMD_ACTIVATE:
4979 	case BNX2X_Q_CMD_UPDATE:
4980 	case BNX2X_Q_CMD_UPDATE_TPA:
4981 	case BNX2X_Q_CMD_HALT:
4982 	case BNX2X_Q_CMD_CFC_DEL:
4983 	case BNX2X_Q_CMD_TERMINATE:
4984 	case BNX2X_Q_CMD_EMPTY:
4985 		return bnx2x_queue_send_cmd_cmn(bp, params);
4986 	default:
4987 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4988 		return -EINVAL;
4989 	}
4990 }
4991 
4992 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4993 				   struct bnx2x_queue_state_params *params)
4994 {
4995 	switch (params->cmd) {
4996 	case BNX2X_Q_CMD_SETUP:
4997 		return bnx2x_q_send_setup_e2(bp, params);
4998 	case BNX2X_Q_CMD_INIT:
4999 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
5000 	case BNX2X_Q_CMD_DEACTIVATE:
5001 	case BNX2X_Q_CMD_ACTIVATE:
5002 	case BNX2X_Q_CMD_UPDATE:
5003 	case BNX2X_Q_CMD_UPDATE_TPA:
5004 	case BNX2X_Q_CMD_HALT:
5005 	case BNX2X_Q_CMD_CFC_DEL:
5006 	case BNX2X_Q_CMD_TERMINATE:
5007 	case BNX2X_Q_CMD_EMPTY:
5008 		return bnx2x_queue_send_cmd_cmn(bp, params);
5009 	default:
5010 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5011 		return -EINVAL;
5012 	}
5013 }
5014 
5015 /**
5016  * bnx2x_queue_chk_transition - check state machine of a regular Queue
5017  *
5018  * @bp:		device handle
5019  * @o:
5020  * @params:
5021  *
5022  * (not Forwarding)
5023  * It both checks if the requested command is legal in a current
5024  * state and, if it's legal, sets a `next_state' in the object
5025  * that will be used in the completion flow to set the `state'
5026  * of the object.
5027  *
5028  * returns 0 if a requested command is a legal transition,
5029  *         -EINVAL otherwise.
5030  */
5031 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5032 				      struct bnx2x_queue_sp_obj *o,
5033 				      struct bnx2x_queue_state_params *params)
5034 {
5035 	enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5036 	enum bnx2x_queue_cmd cmd = params->cmd;
5037 	struct bnx2x_queue_update_params *update_params =
5038 		 &params->params.update;
5039 	u8 next_tx_only = o->num_tx_only;
5040 
5041 	/*
5042 	 * Forget all pending for completion commands if a driver only state
5043 	 * transition has been requested.
5044 	 */
5045 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5046 		o->pending = 0;
5047 		o->next_state = BNX2X_Q_STATE_MAX;
5048 	}
5049 
5050 	/*
5051 	 * Don't allow a next state transition if we are in the middle of
5052 	 * the previous one.
5053 	 */
5054 	if (o->pending) {
5055 		BNX2X_ERR("Blocking transition since pending was %lx\n",
5056 			  o->pending);
5057 		return -EBUSY;
5058 	}
5059 
5060 	switch (state) {
5061 	case BNX2X_Q_STATE_RESET:
5062 		if (cmd == BNX2X_Q_CMD_INIT)
5063 			next_state = BNX2X_Q_STATE_INITIALIZED;
5064 
5065 		break;
5066 	case BNX2X_Q_STATE_INITIALIZED:
5067 		if (cmd == BNX2X_Q_CMD_SETUP) {
5068 			if (test_bit(BNX2X_Q_FLG_ACTIVE,
5069 				     &params->params.setup.flags))
5070 				next_state = BNX2X_Q_STATE_ACTIVE;
5071 			else
5072 				next_state = BNX2X_Q_STATE_INACTIVE;
5073 		}
5074 
5075 		break;
5076 	case BNX2X_Q_STATE_ACTIVE:
5077 		if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5078 			next_state = BNX2X_Q_STATE_INACTIVE;
5079 
5080 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5081 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5082 			next_state = BNX2X_Q_STATE_ACTIVE;
5083 
5084 		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5085 			next_state = BNX2X_Q_STATE_MULTI_COS;
5086 			next_tx_only = 1;
5087 		}
5088 
5089 		else if (cmd == BNX2X_Q_CMD_HALT)
5090 			next_state = BNX2X_Q_STATE_STOPPED;
5091 
5092 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5093 			/* If "active" state change is requested, update the
5094 			 *  state accordingly.
5095 			 */
5096 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5097 				     &update_params->update_flags) &&
5098 			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5099 				      &update_params->update_flags))
5100 				next_state = BNX2X_Q_STATE_INACTIVE;
5101 			else
5102 				next_state = BNX2X_Q_STATE_ACTIVE;
5103 		}
5104 
5105 		break;
5106 	case BNX2X_Q_STATE_MULTI_COS:
5107 		if (cmd == BNX2X_Q_CMD_TERMINATE)
5108 			next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5109 
5110 		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5111 			next_state = BNX2X_Q_STATE_MULTI_COS;
5112 			next_tx_only = o->num_tx_only + 1;
5113 		}
5114 
5115 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5116 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5117 			next_state = BNX2X_Q_STATE_MULTI_COS;
5118 
5119 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5120 			/* If "active" state change is requested, update the
5121 			 *  state accordingly.
5122 			 */
5123 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5124 				     &update_params->update_flags) &&
5125 			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5126 				      &update_params->update_flags))
5127 				next_state = BNX2X_Q_STATE_INACTIVE;
5128 			else
5129 				next_state = BNX2X_Q_STATE_MULTI_COS;
5130 		}
5131 
5132 		break;
5133 	case BNX2X_Q_STATE_MCOS_TERMINATED:
5134 		if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5135 			next_tx_only = o->num_tx_only - 1;
5136 			if (next_tx_only == 0)
5137 				next_state = BNX2X_Q_STATE_ACTIVE;
5138 			else
5139 				next_state = BNX2X_Q_STATE_MULTI_COS;
5140 		}
5141 
5142 		break;
5143 	case BNX2X_Q_STATE_INACTIVE:
5144 		if (cmd == BNX2X_Q_CMD_ACTIVATE)
5145 			next_state = BNX2X_Q_STATE_ACTIVE;
5146 
5147 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5148 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5149 			next_state = BNX2X_Q_STATE_INACTIVE;
5150 
5151 		else if (cmd == BNX2X_Q_CMD_HALT)
5152 			next_state = BNX2X_Q_STATE_STOPPED;
5153 
5154 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5155 			/* If "active" state change is requested, update the
5156 			 * state accordingly.
5157 			 */
5158 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5159 				     &update_params->update_flags) &&
5160 			    test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5161 				     &update_params->update_flags)){
5162 				if (o->num_tx_only == 0)
5163 					next_state = BNX2X_Q_STATE_ACTIVE;
5164 				else /* tx only queues exist for this queue */
5165 					next_state = BNX2X_Q_STATE_MULTI_COS;
5166 			} else
5167 				next_state = BNX2X_Q_STATE_INACTIVE;
5168 		}
5169 
5170 		break;
5171 	case BNX2X_Q_STATE_STOPPED:
5172 		if (cmd == BNX2X_Q_CMD_TERMINATE)
5173 			next_state = BNX2X_Q_STATE_TERMINATED;
5174 
5175 		break;
5176 	case BNX2X_Q_STATE_TERMINATED:
5177 		if (cmd == BNX2X_Q_CMD_CFC_DEL)
5178 			next_state = BNX2X_Q_STATE_RESET;
5179 
5180 		break;
5181 	default:
5182 		BNX2X_ERR("Illegal state: %d\n", state);
5183 	}
5184 
5185 	/* Transition is assured */
5186 	if (next_state != BNX2X_Q_STATE_MAX) {
5187 		DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5188 				 state, cmd, next_state);
5189 		o->next_state = next_state;
5190 		o->next_tx_only = next_tx_only;
5191 		return 0;
5192 	}
5193 
5194 	DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5195 
5196 	return -EINVAL;
5197 }
5198 
5199 void bnx2x_init_queue_obj(struct bnx2x *bp,
5200 			  struct bnx2x_queue_sp_obj *obj,
5201 			  u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5202 			  void *rdata,
5203 			  dma_addr_t rdata_mapping, unsigned long type)
5204 {
5205 	memset(obj, 0, sizeof(*obj));
5206 
5207 	/* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5208 	BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5209 
5210 	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5211 	obj->max_cos = cid_cnt;
5212 	obj->cl_id = cl_id;
5213 	obj->func_id = func_id;
5214 	obj->rdata = rdata;
5215 	obj->rdata_mapping = rdata_mapping;
5216 	obj->type = type;
5217 	obj->next_state = BNX2X_Q_STATE_MAX;
5218 
5219 	if (CHIP_IS_E1x(bp))
5220 		obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5221 	else
5222 		obj->send_cmd = bnx2x_queue_send_cmd_e2;
5223 
5224 	obj->check_transition = bnx2x_queue_chk_transition;
5225 
5226 	obj->complete_cmd = bnx2x_queue_comp_cmd;
5227 	obj->wait_comp = bnx2x_queue_wait_comp;
5228 	obj->set_pending = bnx2x_queue_set_pending;
5229 }
5230 
5231 /* return a queue object's logical state*/
5232 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5233 			       struct bnx2x_queue_sp_obj *obj)
5234 {
5235 	switch (obj->state) {
5236 	case BNX2X_Q_STATE_ACTIVE:
5237 	case BNX2X_Q_STATE_MULTI_COS:
5238 		return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5239 	case BNX2X_Q_STATE_RESET:
5240 	case BNX2X_Q_STATE_INITIALIZED:
5241 	case BNX2X_Q_STATE_MCOS_TERMINATED:
5242 	case BNX2X_Q_STATE_INACTIVE:
5243 	case BNX2X_Q_STATE_STOPPED:
5244 	case BNX2X_Q_STATE_TERMINATED:
5245 	case BNX2X_Q_STATE_FLRED:
5246 		return BNX2X_Q_LOGICAL_STATE_STOPPED;
5247 	default:
5248 		return -EINVAL;
5249 	}
5250 }
5251 
5252 /********************** Function state object *********************************/
5253 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5254 					   struct bnx2x_func_sp_obj *o)
5255 {
5256 	/* in the middle of transaction - return INVALID state */
5257 	if (o->pending)
5258 		return BNX2X_F_STATE_MAX;
5259 
5260 	/*
5261 	 * unsure the order of reading of o->pending and o->state
5262 	 * o->pending should be read first
5263 	 */
5264 	rmb();
5265 
5266 	return o->state;
5267 }
5268 
5269 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5270 				struct bnx2x_func_sp_obj *o,
5271 				enum bnx2x_func_cmd cmd)
5272 {
5273 	return bnx2x_state_wait(bp, cmd, &o->pending);
5274 }
5275 
5276 /**
5277  * bnx2x_func_state_change_comp - complete the state machine transition
5278  *
5279  * @bp:		device handle
5280  * @o:
5281  * @cmd:
5282  *
5283  * Called on state change transition. Completes the state
5284  * machine transition only - no HW interaction.
5285  */
5286 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5287 					       struct bnx2x_func_sp_obj *o,
5288 					       enum bnx2x_func_cmd cmd)
5289 {
5290 	unsigned long cur_pending = o->pending;
5291 
5292 	if (!test_and_clear_bit(cmd, &cur_pending)) {
5293 		BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5294 			  cmd, BP_FUNC(bp), o->state,
5295 			  cur_pending, o->next_state);
5296 		return -EINVAL;
5297 	}
5298 
5299 	DP(BNX2X_MSG_SP,
5300 	   "Completing command %d for func %d, setting state to %d\n",
5301 	   cmd, BP_FUNC(bp), o->next_state);
5302 
5303 	o->state = o->next_state;
5304 	o->next_state = BNX2X_F_STATE_MAX;
5305 
5306 	/* It's important that o->state and o->next_state are
5307 	 * updated before o->pending.
5308 	 */
5309 	wmb();
5310 
5311 	clear_bit(cmd, &o->pending);
5312 	smp_mb__after_clear_bit();
5313 
5314 	return 0;
5315 }
5316 
5317 /**
5318  * bnx2x_func_comp_cmd - complete the state change command
5319  *
5320  * @bp:		device handle
5321  * @o:
5322  * @cmd:
5323  *
5324  * Checks that the arrived completion is expected.
5325  */
5326 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5327 			       struct bnx2x_func_sp_obj *o,
5328 			       enum bnx2x_func_cmd cmd)
5329 {
5330 	/* Complete the state machine part first, check if it's a
5331 	 * legal completion.
5332 	 */
5333 	int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5334 	return rc;
5335 }
5336 
5337 /**
5338  * bnx2x_func_chk_transition - perform function state machine transition
5339  *
5340  * @bp:		device handle
5341  * @o:
5342  * @params:
5343  *
5344  * It both checks if the requested command is legal in a current
5345  * state and, if it's legal, sets a `next_state' in the object
5346  * that will be used in the completion flow to set the `state'
5347  * of the object.
5348  *
5349  * returns 0 if a requested command is a legal transition,
5350  *         -EINVAL otherwise.
5351  */
5352 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5353 				     struct bnx2x_func_sp_obj *o,
5354 				     struct bnx2x_func_state_params *params)
5355 {
5356 	enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5357 	enum bnx2x_func_cmd cmd = params->cmd;
5358 
5359 	/*
5360 	 * Forget all pending for completion commands if a driver only state
5361 	 * transition has been requested.
5362 	 */
5363 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5364 		o->pending = 0;
5365 		o->next_state = BNX2X_F_STATE_MAX;
5366 	}
5367 
5368 	/*
5369 	 * Don't allow a next state transition if we are in the middle of
5370 	 * the previous one.
5371 	 */
5372 	if (o->pending)
5373 		return -EBUSY;
5374 
5375 	switch (state) {
5376 	case BNX2X_F_STATE_RESET:
5377 		if (cmd == BNX2X_F_CMD_HW_INIT)
5378 			next_state = BNX2X_F_STATE_INITIALIZED;
5379 
5380 		break;
5381 	case BNX2X_F_STATE_INITIALIZED:
5382 		if (cmd == BNX2X_F_CMD_START)
5383 			next_state = BNX2X_F_STATE_STARTED;
5384 
5385 		else if (cmd == BNX2X_F_CMD_HW_RESET)
5386 			next_state = BNX2X_F_STATE_RESET;
5387 
5388 		break;
5389 	case BNX2X_F_STATE_STARTED:
5390 		if (cmd == BNX2X_F_CMD_STOP)
5391 			next_state = BNX2X_F_STATE_INITIALIZED;
5392 		/* afex ramrods can be sent only in started mode, and only
5393 		 * if not pending for function_stop ramrod completion
5394 		 * for these events - next state remained STARTED.
5395 		 */
5396 		else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5397 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5398 			next_state = BNX2X_F_STATE_STARTED;
5399 
5400 		else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5401 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5402 			next_state = BNX2X_F_STATE_STARTED;
5403 
5404 		/* Switch_update ramrod can be sent in either started or
5405 		 * tx_stopped state, and it doesn't change the state.
5406 		 */
5407 		else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5408 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5409 			next_state = BNX2X_F_STATE_STARTED;
5410 
5411 		else if (cmd == BNX2X_F_CMD_TX_STOP)
5412 			next_state = BNX2X_F_STATE_TX_STOPPED;
5413 
5414 		break;
5415 	case BNX2X_F_STATE_TX_STOPPED:
5416 		if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5417 		    (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5418 			next_state = BNX2X_F_STATE_TX_STOPPED;
5419 
5420 		else if (cmd == BNX2X_F_CMD_TX_START)
5421 			next_state = BNX2X_F_STATE_STARTED;
5422 
5423 		break;
5424 	default:
5425 		BNX2X_ERR("Unknown state: %d\n", state);
5426 	}
5427 
5428 	/* Transition is assured */
5429 	if (next_state != BNX2X_F_STATE_MAX) {
5430 		DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5431 				 state, cmd, next_state);
5432 		o->next_state = next_state;
5433 		return 0;
5434 	}
5435 
5436 	DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5437 			 state, cmd);
5438 
5439 	return -EINVAL;
5440 }
5441 
5442 /**
5443  * bnx2x_func_init_func - performs HW init at function stage
5444  *
5445  * @bp:		device handle
5446  * @drv:
5447  *
5448  * Init HW when the current phase is
5449  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5450  * HW blocks.
5451  */
5452 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5453 				       const struct bnx2x_func_sp_drv_ops *drv)
5454 {
5455 	return drv->init_hw_func(bp);
5456 }
5457 
5458 /**
5459  * bnx2x_func_init_port - performs HW init at port stage
5460  *
5461  * @bp:		device handle
5462  * @drv:
5463  *
5464  * Init HW when the current phase is
5465  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5466  * FUNCTION-only HW blocks.
5467  *
5468  */
5469 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5470 				       const struct bnx2x_func_sp_drv_ops *drv)
5471 {
5472 	int rc = drv->init_hw_port(bp);
5473 	if (rc)
5474 		return rc;
5475 
5476 	return bnx2x_func_init_func(bp, drv);
5477 }
5478 
5479 /**
5480  * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5481  *
5482  * @bp:		device handle
5483  * @drv:
5484  *
5485  * Init HW when the current phase is
5486  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5487  * PORT-only and FUNCTION-only HW blocks.
5488  */
5489 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5490 					const struct bnx2x_func_sp_drv_ops *drv)
5491 {
5492 	int rc = drv->init_hw_cmn_chip(bp);
5493 	if (rc)
5494 		return rc;
5495 
5496 	return bnx2x_func_init_port(bp, drv);
5497 }
5498 
5499 /**
5500  * bnx2x_func_init_cmn - performs HW init at common stage
5501  *
5502  * @bp:		device handle
5503  * @drv:
5504  *
5505  * Init HW when the current phase is
5506  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5507  * PORT-only and FUNCTION-only HW blocks.
5508  */
5509 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5510 				      const struct bnx2x_func_sp_drv_ops *drv)
5511 {
5512 	int rc = drv->init_hw_cmn(bp);
5513 	if (rc)
5514 		return rc;
5515 
5516 	return bnx2x_func_init_port(bp, drv);
5517 }
5518 
5519 static int bnx2x_func_hw_init(struct bnx2x *bp,
5520 			      struct bnx2x_func_state_params *params)
5521 {
5522 	u32 load_code = params->params.hw_init.load_phase;
5523 	struct bnx2x_func_sp_obj *o = params->f_obj;
5524 	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5525 	int rc = 0;
5526 
5527 	DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5528 			 BP_ABS_FUNC(bp), load_code);
5529 
5530 	/* Prepare buffers for unzipping the FW */
5531 	rc = drv->gunzip_init(bp);
5532 	if (rc)
5533 		return rc;
5534 
5535 	/* Prepare FW */
5536 	rc = drv->init_fw(bp);
5537 	if (rc) {
5538 		BNX2X_ERR("Error loading firmware\n");
5539 		goto init_err;
5540 	}
5541 
5542 	/* Handle the beginning of COMMON_XXX pases separatelly... */
5543 	switch (load_code) {
5544 	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5545 		rc = bnx2x_func_init_cmn_chip(bp, drv);
5546 		if (rc)
5547 			goto init_err;
5548 
5549 		break;
5550 	case FW_MSG_CODE_DRV_LOAD_COMMON:
5551 		rc = bnx2x_func_init_cmn(bp, drv);
5552 		if (rc)
5553 			goto init_err;
5554 
5555 		break;
5556 	case FW_MSG_CODE_DRV_LOAD_PORT:
5557 		rc = bnx2x_func_init_port(bp, drv);
5558 		if (rc)
5559 			goto init_err;
5560 
5561 		break;
5562 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5563 		rc = bnx2x_func_init_func(bp, drv);
5564 		if (rc)
5565 			goto init_err;
5566 
5567 		break;
5568 	default:
5569 		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5570 		rc = -EINVAL;
5571 	}
5572 
5573 init_err:
5574 	drv->gunzip_end(bp);
5575 
5576 	/* In case of success, complete the comand immediatelly: no ramrods
5577 	 * have been sent.
5578 	 */
5579 	if (!rc)
5580 		o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5581 
5582 	return rc;
5583 }
5584 
5585 /**
5586  * bnx2x_func_reset_func - reset HW at function stage
5587  *
5588  * @bp:		device handle
5589  * @drv:
5590  *
5591  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5592  * FUNCTION-only HW blocks.
5593  */
5594 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5595 					const struct bnx2x_func_sp_drv_ops *drv)
5596 {
5597 	drv->reset_hw_func(bp);
5598 }
5599 
5600 /**
5601  * bnx2x_func_reset_port - reser HW at port stage
5602  *
5603  * @bp:		device handle
5604  * @drv:
5605  *
5606  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5607  * FUNCTION-only and PORT-only HW blocks.
5608  *
5609  *                 !!!IMPORTANT!!!
5610  *
5611  * It's important to call reset_port before reset_func() as the last thing
5612  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5613  * makes impossible any DMAE transactions.
5614  */
5615 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5616 					const struct bnx2x_func_sp_drv_ops *drv)
5617 {
5618 	drv->reset_hw_port(bp);
5619 	bnx2x_func_reset_func(bp, drv);
5620 }
5621 
5622 /**
5623  * bnx2x_func_reset_cmn - reser HW at common stage
5624  *
5625  * @bp:		device handle
5626  * @drv:
5627  *
5628  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5629  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5630  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5631  */
5632 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5633 					const struct bnx2x_func_sp_drv_ops *drv)
5634 {
5635 	bnx2x_func_reset_port(bp, drv);
5636 	drv->reset_hw_cmn(bp);
5637 }
5638 
5639 
5640 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5641 				      struct bnx2x_func_state_params *params)
5642 {
5643 	u32 reset_phase = params->params.hw_reset.reset_phase;
5644 	struct bnx2x_func_sp_obj *o = params->f_obj;
5645 	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5646 
5647 	DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5648 			 reset_phase);
5649 
5650 	switch (reset_phase) {
5651 	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5652 		bnx2x_func_reset_cmn(bp, drv);
5653 		break;
5654 	case FW_MSG_CODE_DRV_UNLOAD_PORT:
5655 		bnx2x_func_reset_port(bp, drv);
5656 		break;
5657 	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5658 		bnx2x_func_reset_func(bp, drv);
5659 		break;
5660 	default:
5661 		BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5662 			   reset_phase);
5663 		break;
5664 	}
5665 
5666 	/* Complete the comand immediatelly: no ramrods have been sent. */
5667 	o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5668 
5669 	return 0;
5670 }
5671 
5672 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5673 					struct bnx2x_func_state_params *params)
5674 {
5675 	struct bnx2x_func_sp_obj *o = params->f_obj;
5676 	struct function_start_data *rdata =
5677 		(struct function_start_data *)o->rdata;
5678 	dma_addr_t data_mapping = o->rdata_mapping;
5679 	struct bnx2x_func_start_params *start_params = &params->params.start;
5680 
5681 	memset(rdata, 0, sizeof(*rdata));
5682 
5683 	/* Fill the ramrod data with provided parameters */
5684 	rdata->function_mode	= (u8)start_params->mf_mode;
5685 	rdata->sd_vlan_tag	= cpu_to_le16(start_params->sd_vlan_tag);
5686 	rdata->path_id		= BP_PATH(bp);
5687 	rdata->network_cos_mode	= start_params->network_cos_mode;
5688 	rdata->gre_tunnel_mode	= start_params->gre_tunnel_mode;
5689 	rdata->gre_tunnel_rss	= start_params->gre_tunnel_rss;
5690 
5691 	/* No need for an explicit memory barrier here as long we would
5692 	 * need to ensure the ordering of writing to the SPQ element
5693 	 * and updating of the SPQ producer which involves a memory
5694 	 * read and we will have to put a full memory barrier there
5695 	 * (inside bnx2x_sp_post()).
5696 	 */
5697 
5698 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5699 			     U64_HI(data_mapping),
5700 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5701 }
5702 
5703 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5704 					struct bnx2x_func_state_params *params)
5705 {
5706 	struct bnx2x_func_sp_obj *o = params->f_obj;
5707 	struct function_update_data *rdata =
5708 		(struct function_update_data *)o->rdata;
5709 	dma_addr_t data_mapping = o->rdata_mapping;
5710 	struct bnx2x_func_switch_update_params *switch_update_params =
5711 		&params->params.switch_update;
5712 
5713 	memset(rdata, 0, sizeof(*rdata));
5714 
5715 	/* Fill the ramrod data with provided parameters */
5716 	rdata->tx_switch_suspend_change_flg = 1;
5717 	rdata->tx_switch_suspend = switch_update_params->suspend;
5718 	rdata->echo = SWITCH_UPDATE;
5719 
5720 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5721 			     U64_HI(data_mapping),
5722 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5723 }
5724 
5725 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5726 					 struct bnx2x_func_state_params *params)
5727 {
5728 	struct bnx2x_func_sp_obj *o = params->f_obj;
5729 	struct function_update_data *rdata =
5730 		(struct function_update_data *)o->afex_rdata;
5731 	dma_addr_t data_mapping = o->afex_rdata_mapping;
5732 	struct bnx2x_func_afex_update_params *afex_update_params =
5733 		&params->params.afex_update;
5734 
5735 	memset(rdata, 0, sizeof(*rdata));
5736 
5737 	/* Fill the ramrod data with provided parameters */
5738 	rdata->vif_id_change_flg = 1;
5739 	rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5740 	rdata->afex_default_vlan_change_flg = 1;
5741 	rdata->afex_default_vlan =
5742 		cpu_to_le16(afex_update_params->afex_default_vlan);
5743 	rdata->allowed_priorities_change_flg = 1;
5744 	rdata->allowed_priorities = afex_update_params->allowed_priorities;
5745 	rdata->echo = AFEX_UPDATE;
5746 
5747 	/*  No need for an explicit memory barrier here as long we would
5748 	 *  need to ensure the ordering of writing to the SPQ element
5749 	 *  and updating of the SPQ producer which involves a memory
5750 	 *  read and we will have to put a full memory barrier there
5751 	 *  (inside bnx2x_sp_post()).
5752 	 */
5753 	DP(BNX2X_MSG_SP,
5754 	   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5755 	   rdata->vif_id,
5756 	   rdata->afex_default_vlan, rdata->allowed_priorities);
5757 
5758 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5759 			     U64_HI(data_mapping),
5760 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5761 }
5762 
5763 static
5764 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5765 					 struct bnx2x_func_state_params *params)
5766 {
5767 	struct bnx2x_func_sp_obj *o = params->f_obj;
5768 	struct afex_vif_list_ramrod_data *rdata =
5769 		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
5770 	struct bnx2x_func_afex_viflists_params *afex_vif_params =
5771 		&params->params.afex_viflists;
5772 	u64 *p_rdata = (u64 *)rdata;
5773 
5774 	memset(rdata, 0, sizeof(*rdata));
5775 
5776 	/* Fill the ramrod data with provided parameters */
5777 	rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5778 	rdata->func_bit_map          = afex_vif_params->func_bit_map;
5779 	rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5780 	rdata->func_to_clear         = afex_vif_params->func_to_clear;
5781 
5782 	/* send in echo type of sub command */
5783 	rdata->echo = afex_vif_params->afex_vif_list_command;
5784 
5785 	/*  No need for an explicit memory barrier here as long we would
5786 	 *  need to ensure the ordering of writing to the SPQ element
5787 	 *  and updating of the SPQ producer which involves a memory
5788 	 *  read and we will have to put a full memory barrier there
5789 	 *  (inside bnx2x_sp_post()).
5790 	 */
5791 
5792 	DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5793 	   rdata->afex_vif_list_command, rdata->vif_list_index,
5794 	   rdata->func_bit_map, rdata->func_to_clear);
5795 
5796 	/* this ramrod sends data directly and not through DMA mapping */
5797 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5798 			     U64_HI(*p_rdata), U64_LO(*p_rdata),
5799 			     NONE_CONNECTION_TYPE);
5800 }
5801 
5802 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5803 				       struct bnx2x_func_state_params *params)
5804 {
5805 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5806 			     NONE_CONNECTION_TYPE);
5807 }
5808 
5809 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5810 				       struct bnx2x_func_state_params *params)
5811 {
5812 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5813 			     NONE_CONNECTION_TYPE);
5814 }
5815 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5816 				       struct bnx2x_func_state_params *params)
5817 {
5818 	struct bnx2x_func_sp_obj *o = params->f_obj;
5819 	struct flow_control_configuration *rdata =
5820 		(struct flow_control_configuration *)o->rdata;
5821 	dma_addr_t data_mapping = o->rdata_mapping;
5822 	struct bnx2x_func_tx_start_params *tx_start_params =
5823 		&params->params.tx_start;
5824 	int i;
5825 
5826 	memset(rdata, 0, sizeof(*rdata));
5827 
5828 	rdata->dcb_enabled = tx_start_params->dcb_enabled;
5829 	rdata->dcb_version = tx_start_params->dcb_version;
5830 	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5831 
5832 	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5833 		rdata->traffic_type_to_priority_cos[i] =
5834 			tx_start_params->traffic_type_to_priority_cos[i];
5835 
5836 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5837 			     U64_HI(data_mapping),
5838 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5839 }
5840 
5841 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5842 			       struct bnx2x_func_state_params *params)
5843 {
5844 	switch (params->cmd) {
5845 	case BNX2X_F_CMD_HW_INIT:
5846 		return bnx2x_func_hw_init(bp, params);
5847 	case BNX2X_F_CMD_START:
5848 		return bnx2x_func_send_start(bp, params);
5849 	case BNX2X_F_CMD_STOP:
5850 		return bnx2x_func_send_stop(bp, params);
5851 	case BNX2X_F_CMD_HW_RESET:
5852 		return bnx2x_func_hw_reset(bp, params);
5853 	case BNX2X_F_CMD_AFEX_UPDATE:
5854 		return bnx2x_func_send_afex_update(bp, params);
5855 	case BNX2X_F_CMD_AFEX_VIFLISTS:
5856 		return bnx2x_func_send_afex_viflists(bp, params);
5857 	case BNX2X_F_CMD_TX_STOP:
5858 		return bnx2x_func_send_tx_stop(bp, params);
5859 	case BNX2X_F_CMD_TX_START:
5860 		return bnx2x_func_send_tx_start(bp, params);
5861 	case BNX2X_F_CMD_SWITCH_UPDATE:
5862 		return bnx2x_func_send_switch_update(bp, params);
5863 	default:
5864 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5865 		return -EINVAL;
5866 	}
5867 }
5868 
5869 void bnx2x_init_func_obj(struct bnx2x *bp,
5870 			 struct bnx2x_func_sp_obj *obj,
5871 			 void *rdata, dma_addr_t rdata_mapping,
5872 			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5873 			 struct bnx2x_func_sp_drv_ops *drv_iface)
5874 {
5875 	memset(obj, 0, sizeof(*obj));
5876 
5877 	mutex_init(&obj->one_pending_mutex);
5878 
5879 	obj->rdata = rdata;
5880 	obj->rdata_mapping = rdata_mapping;
5881 	obj->afex_rdata = afex_rdata;
5882 	obj->afex_rdata_mapping = afex_rdata_mapping;
5883 	obj->send_cmd = bnx2x_func_send_cmd;
5884 	obj->check_transition = bnx2x_func_chk_transition;
5885 	obj->complete_cmd = bnx2x_func_comp_cmd;
5886 	obj->wait_comp = bnx2x_func_wait_comp;
5887 
5888 	obj->drv = drv_iface;
5889 }
5890 
5891 /**
5892  * bnx2x_func_state_change - perform Function state change transition
5893  *
5894  * @bp:		device handle
5895  * @params:	parameters to perform the transaction
5896  *
5897  * returns 0 in case of successfully completed transition,
5898  *         negative error code in case of failure, positive
5899  *         (EBUSY) value if there is a completion to that is
5900  *         still pending (possible only if RAMROD_COMP_WAIT is
5901  *         not set in params->ramrod_flags for asynchronous
5902  *         commands).
5903  */
5904 int bnx2x_func_state_change(struct bnx2x *bp,
5905 			    struct bnx2x_func_state_params *params)
5906 {
5907 	struct bnx2x_func_sp_obj *o = params->f_obj;
5908 	int rc, cnt = 300;
5909 	enum bnx2x_func_cmd cmd = params->cmd;
5910 	unsigned long *pending = &o->pending;
5911 
5912 	mutex_lock(&o->one_pending_mutex);
5913 
5914 	/* Check that the requested transition is legal */
5915 	rc = o->check_transition(bp, o, params);
5916 	if ((rc == -EBUSY) &&
5917 	    (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5918 		while ((rc == -EBUSY) && (--cnt > 0)) {
5919 			mutex_unlock(&o->one_pending_mutex);
5920 			msleep(10);
5921 			mutex_lock(&o->one_pending_mutex);
5922 			rc = o->check_transition(bp, o, params);
5923 		}
5924 		if (rc == -EBUSY) {
5925 			mutex_unlock(&o->one_pending_mutex);
5926 			BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5927 			return rc;
5928 		}
5929 	} else if (rc) {
5930 		mutex_unlock(&o->one_pending_mutex);
5931 		return rc;
5932 	}
5933 
5934 	/* Set "pending" bit */
5935 	set_bit(cmd, pending);
5936 
5937 	/* Don't send a command if only driver cleanup was requested */
5938 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5939 		bnx2x_func_state_change_comp(bp, o, cmd);
5940 		mutex_unlock(&o->one_pending_mutex);
5941 	} else {
5942 		/* Send a ramrod */
5943 		rc = o->send_cmd(bp, params);
5944 
5945 		mutex_unlock(&o->one_pending_mutex);
5946 
5947 		if (rc) {
5948 			o->next_state = BNX2X_F_STATE_MAX;
5949 			clear_bit(cmd, pending);
5950 			smp_mb__after_clear_bit();
5951 			return rc;
5952 		}
5953 
5954 		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5955 			rc = o->wait_comp(bp, o, cmd);
5956 			if (rc)
5957 				return rc;
5958 
5959 			return 0;
5960 		}
5961 	}
5962 
5963 	return !!test_bit(cmd, pending);
5964 }
5965