1fe56b9e6SYuval Mintz /* QLogic qed NIC Driver
2e8f1cb50SMintz, Yuval  * Copyright (c) 2015-2017  QLogic Corporation
3fe56b9e6SYuval Mintz  *
4e8f1cb50SMintz, Yuval  * This software is available to you under a choice of one of two
5e8f1cb50SMintz, Yuval  * licenses.  You may choose to be licensed under the terms of the GNU
6e8f1cb50SMintz, Yuval  * General Public License (GPL) Version 2, available from the file
7e8f1cb50SMintz, Yuval  * COPYING in the main directory of this source tree, or the
8e8f1cb50SMintz, Yuval  * OpenIB.org BSD license below:
9e8f1cb50SMintz, Yuval  *
10e8f1cb50SMintz, Yuval  *     Redistribution and use in source and binary forms, with or
11e8f1cb50SMintz, Yuval  *     without modification, are permitted provided that the following
12e8f1cb50SMintz, Yuval  *     conditions are met:
13e8f1cb50SMintz, Yuval  *
14e8f1cb50SMintz, Yuval  *      - Redistributions of source code must retain the above
15e8f1cb50SMintz, Yuval  *        copyright notice, this list of conditions and the following
16e8f1cb50SMintz, Yuval  *        disclaimer.
17e8f1cb50SMintz, Yuval  *
18e8f1cb50SMintz, Yuval  *      - Redistributions in binary form must reproduce the above
19e8f1cb50SMintz, Yuval  *        copyright notice, this list of conditions and the following
20e8f1cb50SMintz, Yuval  *        disclaimer in the documentation and /or other materials
21e8f1cb50SMintz, Yuval  *        provided with the distribution.
22e8f1cb50SMintz, Yuval  *
23e8f1cb50SMintz, Yuval  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e8f1cb50SMintz, Yuval  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e8f1cb50SMintz, Yuval  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e8f1cb50SMintz, Yuval  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e8f1cb50SMintz, Yuval  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e8f1cb50SMintz, Yuval  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e8f1cb50SMintz, Yuval  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e8f1cb50SMintz, Yuval  * SOFTWARE.
31fe56b9e6SYuval Mintz  */
32fe56b9e6SYuval Mintz 
33fe56b9e6SYuval Mintz #include <linux/types.h>
34fe56b9e6SYuval Mintz #include <asm/byteorder.h>
35fe56b9e6SYuval Mintz #include <linux/io.h>
36fe56b9e6SYuval Mintz #include <linux/delay.h>
37fe56b9e6SYuval Mintz #include <linux/dma-mapping.h>
38fe56b9e6SYuval Mintz #include <linux/errno.h>
39fe56b9e6SYuval Mintz #include <linux/kernel.h>
40fe56b9e6SYuval Mintz #include <linux/mutex.h>
41fe56b9e6SYuval Mintz #include <linux/pci.h>
42fe56b9e6SYuval Mintz #include <linux/slab.h>
43fe56b9e6SYuval Mintz #include <linux/string.h>
44a91eb52aSYuval Mintz #include <linux/vmalloc.h>
45fe56b9e6SYuval Mintz #include <linux/etherdevice.h>
46fe56b9e6SYuval Mintz #include <linux/qed/qed_chain.h>
47fe56b9e6SYuval Mintz #include <linux/qed/qed_if.h>
48fe56b9e6SYuval Mintz #include "qed.h"
49fe56b9e6SYuval Mintz #include "qed_cxt.h"
5039651abdSSudarsana Reddy Kalluru #include "qed_dcbx.h"
51fe56b9e6SYuval Mintz #include "qed_dev_api.h"
521e128c81SArun Easi #include "qed_fcoe.h"
53fe56b9e6SYuval Mintz #include "qed_hsi.h"
54fe56b9e6SYuval Mintz #include "qed_hw.h"
55fe56b9e6SYuval Mintz #include "qed_init_ops.h"
56fe56b9e6SYuval Mintz #include "qed_int.h"
57fc831825SYuval Mintz #include "qed_iscsi.h"
580a7fb11cSYuval Mintz #include "qed_ll2.h"
59fe56b9e6SYuval Mintz #include "qed_mcp.h"
601d6cff4fSYuval Mintz #include "qed_ooo.h"
61fe56b9e6SYuval Mintz #include "qed_reg_addr.h"
62fe56b9e6SYuval Mintz #include "qed_sp.h"
6332a47e72SYuval Mintz #include "qed_sriov.h"
640b55e27dSYuval Mintz #include "qed_vf.h"
65b71b9afdSKalderon, Michal #include "qed_rdma.h"
66fe56b9e6SYuval Mintz 
670caf5b26SWei Yongjun static DEFINE_SPINLOCK(qm_lock);
6839651abdSSudarsana Reddy Kalluru 
6936907cd5SAriel Elior /******************** Doorbell Recovery *******************/
7036907cd5SAriel Elior /* The doorbell recovery mechanism consists of a list of entries which represent
7136907cd5SAriel Elior  * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
7236907cd5SAriel Elior  * entity needs to register with the mechanism and provide the parameters
7336907cd5SAriel Elior  * describing it's doorbell, including a location where last used doorbell data
7436907cd5SAriel Elior  * can be found. The doorbell execute function will traverse the list and
7536907cd5SAriel Elior  * doorbell all of the registered entries.
7636907cd5SAriel Elior  */
7736907cd5SAriel Elior struct qed_db_recovery_entry {
7836907cd5SAriel Elior 	struct list_head list_entry;
7936907cd5SAriel Elior 	void __iomem *db_addr;
8036907cd5SAriel Elior 	void *db_data;
8136907cd5SAriel Elior 	enum qed_db_rec_width db_width;
8236907cd5SAriel Elior 	enum qed_db_rec_space db_space;
8336907cd5SAriel Elior 	u8 hwfn_idx;
8436907cd5SAriel Elior };
8536907cd5SAriel Elior 
8636907cd5SAriel Elior /* Display a single doorbell recovery entry */
8736907cd5SAriel Elior static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
8836907cd5SAriel Elior 				     struct qed_db_recovery_entry *db_entry,
8936907cd5SAriel Elior 				     char *action)
9036907cd5SAriel Elior {
9136907cd5SAriel Elior 	DP_VERBOSE(p_hwfn,
9236907cd5SAriel Elior 		   QED_MSG_SPQ,
9336907cd5SAriel Elior 		   "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
9436907cd5SAriel Elior 		   action,
9536907cd5SAriel Elior 		   db_entry,
9636907cd5SAriel Elior 		   db_entry->db_addr,
9736907cd5SAriel Elior 		   db_entry->db_data,
9836907cd5SAriel Elior 		   db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
9936907cd5SAriel Elior 		   db_entry->db_space == DB_REC_USER ? "user" : "kernel",
10036907cd5SAriel Elior 		   db_entry->hwfn_idx);
10136907cd5SAriel Elior }
10236907cd5SAriel Elior 
10336907cd5SAriel Elior /* Doorbell address sanity (address within doorbell bar range) */
10436907cd5SAriel Elior static bool qed_db_rec_sanity(struct qed_dev *cdev,
10536907cd5SAriel Elior 			      void __iomem *db_addr, void *db_data)
10636907cd5SAriel Elior {
10736907cd5SAriel Elior 	/* Make sure doorbell address is within the doorbell bar */
10836907cd5SAriel Elior 	if (db_addr < cdev->doorbells ||
10936907cd5SAriel Elior 	    (u8 __iomem *)db_addr >
11036907cd5SAriel Elior 	    (u8 __iomem *)cdev->doorbells + cdev->db_size) {
11136907cd5SAriel Elior 		WARN(true,
11236907cd5SAriel Elior 		     "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
11336907cd5SAriel Elior 		     db_addr,
11436907cd5SAriel Elior 		     cdev->doorbells,
11536907cd5SAriel Elior 		     (u8 __iomem *)cdev->doorbells + cdev->db_size);
11636907cd5SAriel Elior 		return false;
11736907cd5SAriel Elior 	}
11836907cd5SAriel Elior 
11936907cd5SAriel Elior 	/* ake sure doorbell data pointer is not null */
12036907cd5SAriel Elior 	if (!db_data) {
12136907cd5SAriel Elior 		WARN(true, "Illegal doorbell data pointer: %p", db_data);
12236907cd5SAriel Elior 		return false;
12336907cd5SAriel Elior 	}
12436907cd5SAriel Elior 
12536907cd5SAriel Elior 	return true;
12636907cd5SAriel Elior }
12736907cd5SAriel Elior 
12836907cd5SAriel Elior /* Find hwfn according to the doorbell address */
12936907cd5SAriel Elior static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev,
13036907cd5SAriel Elior 					     void __iomem *db_addr)
13136907cd5SAriel Elior {
13236907cd5SAriel Elior 	struct qed_hwfn *p_hwfn;
13336907cd5SAriel Elior 
13436907cd5SAriel Elior 	/* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
13536907cd5SAriel Elior 	if (cdev->num_hwfns > 1)
13636907cd5SAriel Elior 		p_hwfn = db_addr < cdev->hwfns[1].doorbells ?
13736907cd5SAriel Elior 		    &cdev->hwfns[0] : &cdev->hwfns[1];
13836907cd5SAriel Elior 	else
13936907cd5SAriel Elior 		p_hwfn = QED_LEADING_HWFN(cdev);
14036907cd5SAriel Elior 
14136907cd5SAriel Elior 	return p_hwfn;
14236907cd5SAriel Elior }
14336907cd5SAriel Elior 
14436907cd5SAriel Elior /* Add a new entry to the doorbell recovery mechanism */
14536907cd5SAriel Elior int qed_db_recovery_add(struct qed_dev *cdev,
14636907cd5SAriel Elior 			void __iomem *db_addr,
14736907cd5SAriel Elior 			void *db_data,
14836907cd5SAriel Elior 			enum qed_db_rec_width db_width,
14936907cd5SAriel Elior 			enum qed_db_rec_space db_space)
15036907cd5SAriel Elior {
15136907cd5SAriel Elior 	struct qed_db_recovery_entry *db_entry;
15236907cd5SAriel Elior 	struct qed_hwfn *p_hwfn;
15336907cd5SAriel Elior 
15436907cd5SAriel Elior 	/* Shortcircuit VFs, for now */
15536907cd5SAriel Elior 	if (IS_VF(cdev)) {
15636907cd5SAriel Elior 		DP_VERBOSE(cdev,
15736907cd5SAriel Elior 			   QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
15836907cd5SAriel Elior 		return 0;
15936907cd5SAriel Elior 	}
16036907cd5SAriel Elior 
16136907cd5SAriel Elior 	/* Sanitize doorbell address */
16236907cd5SAriel Elior 	if (!qed_db_rec_sanity(cdev, db_addr, db_data))
16336907cd5SAriel Elior 		return -EINVAL;
16436907cd5SAriel Elior 
16536907cd5SAriel Elior 	/* Obtain hwfn from doorbell address */
16636907cd5SAriel Elior 	p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
16736907cd5SAriel Elior 
16836907cd5SAriel Elior 	/* Create entry */
16936907cd5SAriel Elior 	db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL);
17036907cd5SAriel Elior 	if (!db_entry) {
17136907cd5SAriel Elior 		DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n");
17236907cd5SAriel Elior 		return -ENOMEM;
17336907cd5SAriel Elior 	}
17436907cd5SAriel Elior 
17536907cd5SAriel Elior 	/* Populate entry */
17636907cd5SAriel Elior 	db_entry->db_addr = db_addr;
17736907cd5SAriel Elior 	db_entry->db_data = db_data;
17836907cd5SAriel Elior 	db_entry->db_width = db_width;
17936907cd5SAriel Elior 	db_entry->db_space = db_space;
18036907cd5SAriel Elior 	db_entry->hwfn_idx = p_hwfn->my_id;
18136907cd5SAriel Elior 
18236907cd5SAriel Elior 	/* Display */
18336907cd5SAriel Elior 	qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
18436907cd5SAriel Elior 
18536907cd5SAriel Elior 	/* Protect the list */
18636907cd5SAriel Elior 	spin_lock_bh(&p_hwfn->db_recovery_info.lock);
18736907cd5SAriel Elior 	list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list);
18836907cd5SAriel Elior 	spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
18936907cd5SAriel Elior 
19036907cd5SAriel Elior 	return 0;
19136907cd5SAriel Elior }
19236907cd5SAriel Elior 
19336907cd5SAriel Elior /* Remove an entry from the doorbell recovery mechanism */
19436907cd5SAriel Elior int qed_db_recovery_del(struct qed_dev *cdev,
19536907cd5SAriel Elior 			void __iomem *db_addr, void *db_data)
19636907cd5SAriel Elior {
19736907cd5SAriel Elior 	struct qed_db_recovery_entry *db_entry = NULL;
19836907cd5SAriel Elior 	struct qed_hwfn *p_hwfn;
19936907cd5SAriel Elior 	int rc = -EINVAL;
20036907cd5SAriel Elior 
20136907cd5SAriel Elior 	/* Shortcircuit VFs, for now */
20236907cd5SAriel Elior 	if (IS_VF(cdev)) {
20336907cd5SAriel Elior 		DP_VERBOSE(cdev,
20436907cd5SAriel Elior 			   QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
20536907cd5SAriel Elior 		return 0;
20636907cd5SAriel Elior 	}
20736907cd5SAriel Elior 
20836907cd5SAriel Elior 	/* Sanitize doorbell address */
20936907cd5SAriel Elior 	if (!qed_db_rec_sanity(cdev, db_addr, db_data))
21036907cd5SAriel Elior 		return -EINVAL;
21136907cd5SAriel Elior 
21236907cd5SAriel Elior 	/* Obtain hwfn from doorbell address */
21336907cd5SAriel Elior 	p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
21436907cd5SAriel Elior 
21536907cd5SAriel Elior 	/* Protect the list */
21636907cd5SAriel Elior 	spin_lock_bh(&p_hwfn->db_recovery_info.lock);
21736907cd5SAriel Elior 	list_for_each_entry(db_entry,
21836907cd5SAriel Elior 			    &p_hwfn->db_recovery_info.list, list_entry) {
21936907cd5SAriel Elior 		/* search according to db_data addr since db_addr is not unique (roce) */
22036907cd5SAriel Elior 		if (db_entry->db_data == db_data) {
22136907cd5SAriel Elior 			qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
22236907cd5SAriel Elior 			list_del(&db_entry->list_entry);
22336907cd5SAriel Elior 			rc = 0;
22436907cd5SAriel Elior 			break;
22536907cd5SAriel Elior 		}
22636907cd5SAriel Elior 	}
22736907cd5SAriel Elior 
22836907cd5SAriel Elior 	spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
22936907cd5SAriel Elior 
23036907cd5SAriel Elior 	if (rc == -EINVAL)
23136907cd5SAriel Elior 
23236907cd5SAriel Elior 		DP_NOTICE(p_hwfn,
23336907cd5SAriel Elior 			  "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
23436907cd5SAriel Elior 			  db_data, db_addr);
23536907cd5SAriel Elior 	else
23636907cd5SAriel Elior 		kfree(db_entry);
23736907cd5SAriel Elior 
23836907cd5SAriel Elior 	return rc;
23936907cd5SAriel Elior }
24036907cd5SAriel Elior 
24136907cd5SAriel Elior /* Initialize the doorbell recovery mechanism */
24236907cd5SAriel Elior static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn)
24336907cd5SAriel Elior {
24436907cd5SAriel Elior 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n");
24536907cd5SAriel Elior 
24636907cd5SAriel Elior 	/* Make sure db_size was set in cdev */
24736907cd5SAriel Elior 	if (!p_hwfn->cdev->db_size) {
24836907cd5SAriel Elior 		DP_ERR(p_hwfn->cdev, "db_size not set\n");
24936907cd5SAriel Elior 		return -EINVAL;
25036907cd5SAriel Elior 	}
25136907cd5SAriel Elior 
25236907cd5SAriel Elior 	INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list);
25336907cd5SAriel Elior 	spin_lock_init(&p_hwfn->db_recovery_info.lock);
25436907cd5SAriel Elior 	p_hwfn->db_recovery_info.db_recovery_counter = 0;
25536907cd5SAriel Elior 
25636907cd5SAriel Elior 	return 0;
25736907cd5SAriel Elior }
25836907cd5SAriel Elior 
25936907cd5SAriel Elior /* Destroy the doorbell recovery mechanism */
26036907cd5SAriel Elior static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
26136907cd5SAriel Elior {
26236907cd5SAriel Elior 	struct qed_db_recovery_entry *db_entry = NULL;
26336907cd5SAriel Elior 
26436907cd5SAriel Elior 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n");
26536907cd5SAriel Elior 	if (!list_empty(&p_hwfn->db_recovery_info.list)) {
26636907cd5SAriel Elior 		DP_VERBOSE(p_hwfn,
26736907cd5SAriel Elior 			   QED_MSG_SPQ,
26836907cd5SAriel Elior 			   "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
26936907cd5SAriel Elior 		while (!list_empty(&p_hwfn->db_recovery_info.list)) {
27036907cd5SAriel Elior 			db_entry =
27136907cd5SAriel Elior 			    list_first_entry(&p_hwfn->db_recovery_info.list,
27236907cd5SAriel Elior 					     struct qed_db_recovery_entry,
27336907cd5SAriel Elior 					     list_entry);
27436907cd5SAriel Elior 			qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
27536907cd5SAriel Elior 			list_del(&db_entry->list_entry);
27636907cd5SAriel Elior 			kfree(db_entry);
27736907cd5SAriel Elior 		}
27836907cd5SAriel Elior 	}
27936907cd5SAriel Elior 	p_hwfn->db_recovery_info.db_recovery_counter = 0;
28036907cd5SAriel Elior }
28136907cd5SAriel Elior 
28236907cd5SAriel Elior /* Print the content of the doorbell recovery mechanism */
28336907cd5SAriel Elior void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
28436907cd5SAriel Elior {
28536907cd5SAriel Elior 	struct qed_db_recovery_entry *db_entry = NULL;
28636907cd5SAriel Elior 
28736907cd5SAriel Elior 	DP_NOTICE(p_hwfn,
288d1ecf8a6SColin Ian King 		  "Displaying doorbell recovery database. Counter was %d\n",
28936907cd5SAriel Elior 		  p_hwfn->db_recovery_info.db_recovery_counter);
29036907cd5SAriel Elior 
29136907cd5SAriel Elior 	/* Protect the list */
29236907cd5SAriel Elior 	spin_lock_bh(&p_hwfn->db_recovery_info.lock);
29336907cd5SAriel Elior 	list_for_each_entry(db_entry,
29436907cd5SAriel Elior 			    &p_hwfn->db_recovery_info.list, list_entry) {
29536907cd5SAriel Elior 		qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
29636907cd5SAriel Elior 	}
29736907cd5SAriel Elior 
29836907cd5SAriel Elior 	spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
29936907cd5SAriel Elior }
30036907cd5SAriel Elior 
30136907cd5SAriel Elior /* Ring the doorbell of a single doorbell recovery entry */
30236907cd5SAriel Elior static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
30336907cd5SAriel Elior 				 struct qed_db_recovery_entry *db_entry,
30436907cd5SAriel Elior 				 enum qed_db_rec_exec db_exec)
30536907cd5SAriel Elior {
30636907cd5SAriel Elior 	if (db_exec != DB_REC_ONCE) {
30736907cd5SAriel Elior 		/* Print according to width */
30836907cd5SAriel Elior 		if (db_entry->db_width == DB_REC_WIDTH_32B) {
30936907cd5SAriel Elior 			DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
31036907cd5SAriel Elior 				   "%s doorbell address %p data %x\n",
31136907cd5SAriel Elior 				   db_exec == DB_REC_DRY_RUN ?
31236907cd5SAriel Elior 				   "would have rung" : "ringing",
31336907cd5SAriel Elior 				   db_entry->db_addr,
31436907cd5SAriel Elior 				   *(u32 *)db_entry->db_data);
31536907cd5SAriel Elior 		} else {
31636907cd5SAriel Elior 			DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
31736907cd5SAriel Elior 				   "%s doorbell address %p data %llx\n",
31836907cd5SAriel Elior 				   db_exec == DB_REC_DRY_RUN ?
31936907cd5SAriel Elior 				   "would have rung" : "ringing",
32036907cd5SAriel Elior 				   db_entry->db_addr,
32136907cd5SAriel Elior 				   *(u64 *)(db_entry->db_data));
32236907cd5SAriel Elior 		}
32336907cd5SAriel Elior 	}
32436907cd5SAriel Elior 
32536907cd5SAriel Elior 	/* Sanity */
32636907cd5SAriel Elior 	if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
32736907cd5SAriel Elior 			       db_entry->db_data))
32836907cd5SAriel Elior 		return;
32936907cd5SAriel Elior 
33036907cd5SAriel Elior 	/* Flush the write combined buffer. Since there are multiple doorbelling
33136907cd5SAriel Elior 	 * entities using the same address, if we don't flush, a transaction
33236907cd5SAriel Elior 	 * could be lost.
33336907cd5SAriel Elior 	 */
33436907cd5SAriel Elior 	wmb();
33536907cd5SAriel Elior 
33636907cd5SAriel Elior 	/* Ring the doorbell */
33736907cd5SAriel Elior 	if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
33836907cd5SAriel Elior 		if (db_entry->db_width == DB_REC_WIDTH_32B)
33936907cd5SAriel Elior 			DIRECT_REG_WR(db_entry->db_addr,
34036907cd5SAriel Elior 				      *(u32 *)(db_entry->db_data));
34136907cd5SAriel Elior 		else
34236907cd5SAriel Elior 			DIRECT_REG_WR64(db_entry->db_addr,
34336907cd5SAriel Elior 					*(u64 *)(db_entry->db_data));
34436907cd5SAriel Elior 	}
34536907cd5SAriel Elior 
34636907cd5SAriel Elior 	/* Flush the write combined buffer. Next doorbell may come from a
34736907cd5SAriel Elior 	 * different entity to the same address...
34836907cd5SAriel Elior 	 */
34936907cd5SAriel Elior 	wmb();
35036907cd5SAriel Elior }
35136907cd5SAriel Elior 
35236907cd5SAriel Elior /* Traverse the doorbell recovery entry list and ring all the doorbells */
35336907cd5SAriel Elior void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
35436907cd5SAriel Elior 			     enum qed_db_rec_exec db_exec)
35536907cd5SAriel Elior {
35636907cd5SAriel Elior 	struct qed_db_recovery_entry *db_entry = NULL;
35736907cd5SAriel Elior 
35836907cd5SAriel Elior 	if (db_exec != DB_REC_ONCE) {
35936907cd5SAriel Elior 		DP_NOTICE(p_hwfn,
36036907cd5SAriel Elior 			  "Executing doorbell recovery. Counter was %d\n",
36136907cd5SAriel Elior 			  p_hwfn->db_recovery_info.db_recovery_counter);
36236907cd5SAriel Elior 
36336907cd5SAriel Elior 		/* Track amount of times recovery was executed */
36436907cd5SAriel Elior 		p_hwfn->db_recovery_info.db_recovery_counter++;
36536907cd5SAriel Elior 	}
36636907cd5SAriel Elior 
36736907cd5SAriel Elior 	/* Protect the list */
36836907cd5SAriel Elior 	spin_lock_bh(&p_hwfn->db_recovery_info.lock);
36936907cd5SAriel Elior 	list_for_each_entry(db_entry,
37036907cd5SAriel Elior 			    &p_hwfn->db_recovery_info.list, list_entry) {
37136907cd5SAriel Elior 		qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
37236907cd5SAriel Elior 		if (db_exec == DB_REC_ONCE)
37336907cd5SAriel Elior 			break;
37436907cd5SAriel Elior 	}
37536907cd5SAriel Elior 
37636907cd5SAriel Elior 	spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
37736907cd5SAriel Elior }
37836907cd5SAriel Elior 
37936907cd5SAriel Elior /******************** Doorbell Recovery end ****************/
38036907cd5SAriel Elior 
38151ff1725SRam Amrani #define QED_MIN_DPIS            (4)
38251ff1725SRam Amrani #define QED_MIN_PWM_REGION      (QED_WID_SIZE * QED_MIN_DPIS)
38351ff1725SRam Amrani 
38415582962SRahul Verma static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
38515582962SRahul Verma 			   struct qed_ptt *p_ptt, enum BAR_ID bar_id)
386c2035eeaSRam Amrani {
387c2035eeaSRam Amrani 	u32 bar_reg = (bar_id == BAR_ID_0 ?
388c2035eeaSRam Amrani 		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
3891408cc1fSYuval Mintz 	u32 val;
390c2035eeaSRam Amrani 
3911408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
3921a850bfcSMintz, Yuval 		return qed_vf_hw_bar_size(p_hwfn, bar_id);
3931408cc1fSYuval Mintz 
39415582962SRahul Verma 	val = qed_rd(p_hwfn, p_ptt, bar_reg);
395c2035eeaSRam Amrani 	if (val)
396c2035eeaSRam Amrani 		return 1 << (val + 15);
397c2035eeaSRam Amrani 
398c2035eeaSRam Amrani 	/* Old MFW initialized above registered only conditionally */
399c2035eeaSRam Amrani 	if (p_hwfn->cdev->num_hwfns > 1) {
400c2035eeaSRam Amrani 		DP_INFO(p_hwfn,
401c2035eeaSRam Amrani 			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
402c2035eeaSRam Amrani 			return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
403c2035eeaSRam Amrani 	} else {
404c2035eeaSRam Amrani 		DP_INFO(p_hwfn,
405c2035eeaSRam Amrani 			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
406c2035eeaSRam Amrani 			return 512 * 1024;
407c2035eeaSRam Amrani 	}
408c2035eeaSRam Amrani }
409c2035eeaSRam Amrani 
4101a635e48SYuval Mintz void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
411fe56b9e6SYuval Mintz {
412fe56b9e6SYuval Mintz 	u32 i;
413fe56b9e6SYuval Mintz 
414fe56b9e6SYuval Mintz 	cdev->dp_level = dp_level;
415fe56b9e6SYuval Mintz 	cdev->dp_module = dp_module;
416fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
417fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
418fe56b9e6SYuval Mintz 
419fe56b9e6SYuval Mintz 		p_hwfn->dp_level = dp_level;
420fe56b9e6SYuval Mintz 		p_hwfn->dp_module = dp_module;
421fe56b9e6SYuval Mintz 	}
422fe56b9e6SYuval Mintz }
423fe56b9e6SYuval Mintz 
424fe56b9e6SYuval Mintz void qed_init_struct(struct qed_dev *cdev)
425fe56b9e6SYuval Mintz {
426fe56b9e6SYuval Mintz 	u8 i;
427fe56b9e6SYuval Mintz 
428fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
429fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
430fe56b9e6SYuval Mintz 
431fe56b9e6SYuval Mintz 		p_hwfn->cdev = cdev;
432fe56b9e6SYuval Mintz 		p_hwfn->my_id = i;
433fe56b9e6SYuval Mintz 		p_hwfn->b_active = false;
434fe56b9e6SYuval Mintz 
435fe56b9e6SYuval Mintz 		mutex_init(&p_hwfn->dmae_info.mutex);
436fe56b9e6SYuval Mintz 	}
437fe56b9e6SYuval Mintz 
438fe56b9e6SYuval Mintz 	/* hwfn 0 is always active */
439fe56b9e6SYuval Mintz 	cdev->hwfns[0].b_active = true;
440fe56b9e6SYuval Mintz 
441fe56b9e6SYuval Mintz 	/* set the default cache alignment to 128 */
442fe56b9e6SYuval Mintz 	cdev->cache_shift = 7;
443fe56b9e6SYuval Mintz }
444fe56b9e6SYuval Mintz 
445fe56b9e6SYuval Mintz static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
446fe56b9e6SYuval Mintz {
447fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
448fe56b9e6SYuval Mintz 
449fe56b9e6SYuval Mintz 	kfree(qm_info->qm_pq_params);
450fe56b9e6SYuval Mintz 	qm_info->qm_pq_params = NULL;
451fe56b9e6SYuval Mintz 	kfree(qm_info->qm_vport_params);
452fe56b9e6SYuval Mintz 	qm_info->qm_vport_params = NULL;
453fe56b9e6SYuval Mintz 	kfree(qm_info->qm_port_params);
454fe56b9e6SYuval Mintz 	qm_info->qm_port_params = NULL;
455bcd197c8SManish Chopra 	kfree(qm_info->wfq_data);
456bcd197c8SManish Chopra 	qm_info->wfq_data = NULL;
457fe56b9e6SYuval Mintz }
458fe56b9e6SYuval Mintz 
459a3f72307SDenis Bolotin static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
460a3f72307SDenis Bolotin {
461a3f72307SDenis Bolotin 	kfree(p_hwfn->dbg_user_info);
462a3f72307SDenis Bolotin 	p_hwfn->dbg_user_info = NULL;
463a3f72307SDenis Bolotin }
464a3f72307SDenis Bolotin 
465fe56b9e6SYuval Mintz void qed_resc_free(struct qed_dev *cdev)
466fe56b9e6SYuval Mintz {
467fe56b9e6SYuval Mintz 	int i;
468fe56b9e6SYuval Mintz 
4690db711bbSMintz, Yuval 	if (IS_VF(cdev)) {
4700db711bbSMintz, Yuval 		for_each_hwfn(cdev, i)
4710db711bbSMintz, Yuval 			qed_l2_free(&cdev->hwfns[i]);
4721408cc1fSYuval Mintz 		return;
4730db711bbSMintz, Yuval 	}
4741408cc1fSYuval Mintz 
475fe56b9e6SYuval Mintz 	kfree(cdev->fw_data);
476fe56b9e6SYuval Mintz 	cdev->fw_data = NULL;
477fe56b9e6SYuval Mintz 
478fe56b9e6SYuval Mintz 	kfree(cdev->reset_stats);
4793587cb87STomer Tayar 	cdev->reset_stats = NULL;
480fe56b9e6SYuval Mintz 
481fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
482fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
483fe56b9e6SYuval Mintz 
484fe56b9e6SYuval Mintz 		qed_cxt_mngr_free(p_hwfn);
485fe56b9e6SYuval Mintz 		qed_qm_info_free(p_hwfn);
486fe56b9e6SYuval Mintz 		qed_spq_free(p_hwfn);
4873587cb87STomer Tayar 		qed_eq_free(p_hwfn);
4883587cb87STomer Tayar 		qed_consq_free(p_hwfn);
489fe56b9e6SYuval Mintz 		qed_int_free(p_hwfn);
4900a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2
4913587cb87STomer Tayar 		qed_ll2_free(p_hwfn);
4920a7fb11cSYuval Mintz #endif
4931e128c81SArun Easi 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
4943587cb87STomer Tayar 			qed_fcoe_free(p_hwfn);
4951e128c81SArun Easi 
4961d6cff4fSYuval Mintz 		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
4973587cb87STomer Tayar 			qed_iscsi_free(p_hwfn);
4983587cb87STomer Tayar 			qed_ooo_free(p_hwfn);
4991d6cff4fSYuval Mintz 		}
500291d57f6SMichal Kalderon 
501291d57f6SMichal Kalderon 		if (QED_IS_RDMA_PERSONALITY(p_hwfn))
502291d57f6SMichal Kalderon 			qed_rdma_info_free(p_hwfn);
503291d57f6SMichal Kalderon 
50432a47e72SYuval Mintz 		qed_iov_free(p_hwfn);
5050db711bbSMintz, Yuval 		qed_l2_free(p_hwfn);
506fe56b9e6SYuval Mintz 		qed_dmae_info_free(p_hwfn);
507270837b3Ssudarsana.kalluru@cavium.com 		qed_dcbx_info_free(p_hwfn);
508a3f72307SDenis Bolotin 		qed_dbg_user_data_free(p_hwfn);
50936907cd5SAriel Elior 
51036907cd5SAriel Elior 		/* Destroy doorbell recovery mechanism */
51136907cd5SAriel Elior 		qed_db_recovery_teardown(p_hwfn);
512fe56b9e6SYuval Mintz 	}
513fe56b9e6SYuval Mintz }
514fe56b9e6SYuval Mintz 
515b5a9ee7cSAriel Elior /******************** QM initialization *******************/
516b5a9ee7cSAriel Elior #define ACTIVE_TCS_BMAP 0x9f
517b5a9ee7cSAriel Elior #define ACTIVE_TCS_BMAP_4PORT_K2 0xf
518b5a9ee7cSAriel Elior 
519b5a9ee7cSAriel Elior /* determines the physical queue flags for a given PF. */
520b5a9ee7cSAriel Elior static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
521fe56b9e6SYuval Mintz {
522b5a9ee7cSAriel Elior 	u32 flags;
523fe56b9e6SYuval Mintz 
524b5a9ee7cSAriel Elior 	/* common flags */
525b5a9ee7cSAriel Elior 	flags = PQ_FLAGS_LB;
526fe56b9e6SYuval Mintz 
527b5a9ee7cSAriel Elior 	/* feature flags */
528b5a9ee7cSAriel Elior 	if (IS_QED_SRIOV(p_hwfn->cdev))
529b5a9ee7cSAriel Elior 		flags |= PQ_FLAGS_VFS;
530fe56b9e6SYuval Mintz 
531b5a9ee7cSAriel Elior 	/* protocol flags */
532b5a9ee7cSAriel Elior 	switch (p_hwfn->hw_info.personality) {
533b5a9ee7cSAriel Elior 	case QED_PCI_ETH:
534b5a9ee7cSAriel Elior 		flags |= PQ_FLAGS_MCOS;
535b5a9ee7cSAriel Elior 		break;
536b5a9ee7cSAriel Elior 	case QED_PCI_FCOE:
537b5a9ee7cSAriel Elior 		flags |= PQ_FLAGS_OFLD;
538b5a9ee7cSAriel Elior 		break;
539b5a9ee7cSAriel Elior 	case QED_PCI_ISCSI:
540b5a9ee7cSAriel Elior 		flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
541b5a9ee7cSAriel Elior 		break;
542b5a9ee7cSAriel Elior 	case QED_PCI_ETH_ROCE:
543b5a9ee7cSAriel Elior 		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
54461be82b0SDenis Bolotin 		if (IS_QED_MULTI_TC_ROCE(p_hwfn))
54561be82b0SDenis Bolotin 			flags |= PQ_FLAGS_MTC;
546b5a9ee7cSAriel Elior 		break;
54793c45984SKalderon, Michal 	case QED_PCI_ETH_IWARP:
54893c45984SKalderon, Michal 		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
54993c45984SKalderon, Michal 		    PQ_FLAGS_OFLD;
55093c45984SKalderon, Michal 		break;
551b5a9ee7cSAriel Elior 	default:
552fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
553b5a9ee7cSAriel Elior 		       "unknown personality %d\n", p_hwfn->hw_info.personality);
554b5a9ee7cSAriel Elior 		return 0;
555fe56b9e6SYuval Mintz 	}
556fe56b9e6SYuval Mintz 
557b5a9ee7cSAriel Elior 	return flags;
558b5a9ee7cSAriel Elior }
559b5a9ee7cSAriel Elior 
560b5a9ee7cSAriel Elior /* Getters for resource amounts necessary for qm initialization */
561bf774d14SYueHaibing static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
562b5a9ee7cSAriel Elior {
563b5a9ee7cSAriel Elior 	return p_hwfn->hw_info.num_hw_tc;
564b5a9ee7cSAriel Elior }
565b5a9ee7cSAriel Elior 
566bf774d14SYueHaibing static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
567b5a9ee7cSAriel Elior {
568b5a9ee7cSAriel Elior 	return IS_QED_SRIOV(p_hwfn->cdev) ?
569b5a9ee7cSAriel Elior 	       p_hwfn->cdev->p_iov_info->total_vfs : 0;
570b5a9ee7cSAriel Elior }
571b5a9ee7cSAriel Elior 
57261be82b0SDenis Bolotin static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn)
57361be82b0SDenis Bolotin {
57461be82b0SDenis Bolotin 	u32 pq_flags = qed_get_pq_flags(p_hwfn);
57561be82b0SDenis Bolotin 
57661be82b0SDenis Bolotin 	if (!(PQ_FLAGS_MTC & pq_flags))
57761be82b0SDenis Bolotin 		return 1;
57861be82b0SDenis Bolotin 
57961be82b0SDenis Bolotin 	return qed_init_qm_get_num_tcs(p_hwfn);
58061be82b0SDenis Bolotin }
58161be82b0SDenis Bolotin 
582b5a9ee7cSAriel Elior #define NUM_DEFAULT_RLS 1
583b5a9ee7cSAriel Elior 
584bf774d14SYueHaibing static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
585b5a9ee7cSAriel Elior {
586b5a9ee7cSAriel Elior 	u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
587b5a9ee7cSAriel Elior 
588b5a9ee7cSAriel Elior 	/* num RLs can't exceed resource amount of rls or vports */
589b5a9ee7cSAriel Elior 	num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
590b5a9ee7cSAriel Elior 				 RESC_NUM(p_hwfn, QED_VPORT));
591b5a9ee7cSAriel Elior 
592b5a9ee7cSAriel Elior 	/* Make sure after we reserve there's something left */
593b5a9ee7cSAriel Elior 	if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
594b5a9ee7cSAriel Elior 		return 0;
595b5a9ee7cSAriel Elior 
596b5a9ee7cSAriel Elior 	/* subtract rls necessary for VFs and one default one for the PF */
597b5a9ee7cSAriel Elior 	num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
598b5a9ee7cSAriel Elior 
599b5a9ee7cSAriel Elior 	return num_pf_rls;
600b5a9ee7cSAriel Elior }
601b5a9ee7cSAriel Elior 
602bf774d14SYueHaibing static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
603b5a9ee7cSAriel Elior {
604b5a9ee7cSAriel Elior 	u32 pq_flags = qed_get_pq_flags(p_hwfn);
605b5a9ee7cSAriel Elior 
606b5a9ee7cSAriel Elior 	/* all pqs share the same vport, except for vfs and pf_rl pqs */
607b5a9ee7cSAriel Elior 	return (!!(PQ_FLAGS_RLS & pq_flags)) *
608b5a9ee7cSAriel Elior 	       qed_init_qm_get_num_pf_rls(p_hwfn) +
609b5a9ee7cSAriel Elior 	       (!!(PQ_FLAGS_VFS & pq_flags)) *
610b5a9ee7cSAriel Elior 	       qed_init_qm_get_num_vfs(p_hwfn) + 1;
611b5a9ee7cSAriel Elior }
612b5a9ee7cSAriel Elior 
613b5a9ee7cSAriel Elior /* calc amount of PQs according to the requested flags */
614bf774d14SYueHaibing static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
615b5a9ee7cSAriel Elior {
616b5a9ee7cSAriel Elior 	u32 pq_flags = qed_get_pq_flags(p_hwfn);
617b5a9ee7cSAriel Elior 
618b5a9ee7cSAriel Elior 	return (!!(PQ_FLAGS_RLS & pq_flags)) *
619b5a9ee7cSAriel Elior 	       qed_init_qm_get_num_pf_rls(p_hwfn) +
620b5a9ee7cSAriel Elior 	       (!!(PQ_FLAGS_MCOS & pq_flags)) *
621b5a9ee7cSAriel Elior 	       qed_init_qm_get_num_tcs(p_hwfn) +
622b5a9ee7cSAriel Elior 	       (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
62361be82b0SDenis Bolotin 	       (!!(PQ_FLAGS_ACK & pq_flags)) +
62461be82b0SDenis Bolotin 	       (!!(PQ_FLAGS_OFLD & pq_flags)) *
62561be82b0SDenis Bolotin 	       qed_init_qm_get_num_mtc_tcs(p_hwfn) +
62661be82b0SDenis Bolotin 	       (!!(PQ_FLAGS_LLT & pq_flags)) *
62761be82b0SDenis Bolotin 	       qed_init_qm_get_num_mtc_tcs(p_hwfn) +
628b5a9ee7cSAriel Elior 	       (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
629b5a9ee7cSAriel Elior }
630b5a9ee7cSAriel Elior 
631b5a9ee7cSAriel Elior /* initialize the top level QM params */
632b5a9ee7cSAriel Elior static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
633b5a9ee7cSAriel Elior {
634b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
635b5a9ee7cSAriel Elior 	bool four_port;
636b5a9ee7cSAriel Elior 
637b5a9ee7cSAriel Elior 	/* pq and vport bases for this PF */
638b5a9ee7cSAriel Elior 	qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
639b5a9ee7cSAriel Elior 	qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
640b5a9ee7cSAriel Elior 
641b5a9ee7cSAriel Elior 	/* rate limiting and weighted fair queueing are always enabled */
642c7281d59SGustavo A. R. Silva 	qm_info->vport_rl_en = true;
643c7281d59SGustavo A. R. Silva 	qm_info->vport_wfq_en = true;
644b5a9ee7cSAriel Elior 
645b5a9ee7cSAriel Elior 	/* TC config is different for AH 4 port */
64678cea9ffSTomer Tayar 	four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2;
647b5a9ee7cSAriel Elior 
648b5a9ee7cSAriel Elior 	/* in AH 4 port we have fewer TCs per port */
649b5a9ee7cSAriel Elior 	qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
650b5a9ee7cSAriel Elior 						     NUM_OF_PHYS_TCS;
651b5a9ee7cSAriel Elior 
652b5a9ee7cSAriel Elior 	/* unless MFW indicated otherwise, ooo_tc == 3 for
653b5a9ee7cSAriel Elior 	 * AH 4-port and 4 otherwise.
654fe56b9e6SYuval Mintz 	 */
655b5a9ee7cSAriel Elior 	if (!qm_info->ooo_tc)
656b5a9ee7cSAriel Elior 		qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
657b5a9ee7cSAriel Elior 					      DCBX_TCP_OOO_TC;
658dbb799c3SYuval Mintz }
659dbb799c3SYuval Mintz 
660b5a9ee7cSAriel Elior /* initialize qm vport params */
661b5a9ee7cSAriel Elior static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
662b5a9ee7cSAriel Elior {
663b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
664b5a9ee7cSAriel Elior 	u8 i;
665fe56b9e6SYuval Mintz 
666b5a9ee7cSAriel Elior 	/* all vports participate in weighted fair queueing */
667b5a9ee7cSAriel Elior 	for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
668b5a9ee7cSAriel Elior 		qm_info->qm_vport_params[i].vport_wfq = 1;
669fe56b9e6SYuval Mintz }
670fe56b9e6SYuval Mintz 
671b5a9ee7cSAriel Elior /* initialize qm port params */
672b5a9ee7cSAriel Elior static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
673b5a9ee7cSAriel Elior {
674fe56b9e6SYuval Mintz 	/* Initialize qm port parameters */
67578cea9ffSTomer Tayar 	u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
676b5a9ee7cSAriel Elior 
677b5a9ee7cSAriel Elior 	/* indicate how ooo and high pri traffic is dealt with */
678b5a9ee7cSAriel Elior 	active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
679b5a9ee7cSAriel Elior 			  ACTIVE_TCS_BMAP_4PORT_K2 :
680b5a9ee7cSAriel Elior 			  ACTIVE_TCS_BMAP;
681b5a9ee7cSAriel Elior 
682fe56b9e6SYuval Mintz 	for (i = 0; i < num_ports; i++) {
683b5a9ee7cSAriel Elior 		struct init_qm_port_params *p_qm_port =
684b5a9ee7cSAriel Elior 		    &p_hwfn->qm_info.qm_port_params[i];
685b5a9ee7cSAriel Elior 
686fe56b9e6SYuval Mintz 		p_qm_port->active = 1;
687b5a9ee7cSAriel Elior 		p_qm_port->active_phys_tcs = active_phys_tcs;
688fe56b9e6SYuval Mintz 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
689fe56b9e6SYuval Mintz 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
690fe56b9e6SYuval Mintz 	}
691b5a9ee7cSAriel Elior }
692fe56b9e6SYuval Mintz 
693b5a9ee7cSAriel Elior /* Reset the params which must be reset for qm init. QM init may be called as
694b5a9ee7cSAriel Elior  * a result of flows other than driver load (e.g. dcbx renegotiation). Other
695b5a9ee7cSAriel Elior  * params may be affected by the init but would simply recalculate to the same
696b5a9ee7cSAriel Elior  * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
697b5a9ee7cSAriel Elior  * affected as these amounts stay the same.
698b5a9ee7cSAriel Elior  */
699b5a9ee7cSAriel Elior static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn)
700b5a9ee7cSAriel Elior {
701b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
702fe56b9e6SYuval Mintz 
703b5a9ee7cSAriel Elior 	qm_info->num_pqs = 0;
704b5a9ee7cSAriel Elior 	qm_info->num_vports = 0;
705b5a9ee7cSAriel Elior 	qm_info->num_pf_rls = 0;
706b5a9ee7cSAriel Elior 	qm_info->num_vf_pqs = 0;
707b5a9ee7cSAriel Elior 	qm_info->first_vf_pq = 0;
708b5a9ee7cSAriel Elior 	qm_info->first_mcos_pq = 0;
709b5a9ee7cSAriel Elior 	qm_info->first_rl_pq = 0;
710b5a9ee7cSAriel Elior }
711fe56b9e6SYuval Mintz 
712b5a9ee7cSAriel Elior static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
713b5a9ee7cSAriel Elior {
714b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
715b5a9ee7cSAriel Elior 
716b5a9ee7cSAriel Elior 	qm_info->num_vports++;
717b5a9ee7cSAriel Elior 
718b5a9ee7cSAriel Elior 	if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
719b5a9ee7cSAriel Elior 		DP_ERR(p_hwfn,
720b5a9ee7cSAriel Elior 		       "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
721b5a9ee7cSAriel Elior 		       qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
722b5a9ee7cSAriel Elior }
723b5a9ee7cSAriel Elior 
724b5a9ee7cSAriel Elior /* initialize a single pq and manage qm_info resources accounting.
725b5a9ee7cSAriel Elior  * The pq_init_flags param determines whether the PQ is rate limited
726b5a9ee7cSAriel Elior  * (for VF or PF) and whether a new vport is allocated to the pq or not
727b5a9ee7cSAriel Elior  * (i.e. vport will be shared).
728b5a9ee7cSAriel Elior  */
729b5a9ee7cSAriel Elior 
730b5a9ee7cSAriel Elior /* flags for pq init */
731b5a9ee7cSAriel Elior #define PQ_INIT_SHARE_VPORT     (1 << 0)
732b5a9ee7cSAriel Elior #define PQ_INIT_PF_RL           (1 << 1)
733b5a9ee7cSAriel Elior #define PQ_INIT_VF_RL           (1 << 2)
734b5a9ee7cSAriel Elior 
735b5a9ee7cSAriel Elior /* defines for pq init */
736b5a9ee7cSAriel Elior #define PQ_INIT_DEFAULT_WRR_GROUP       1
737b5a9ee7cSAriel Elior #define PQ_INIT_DEFAULT_TC              0
738c4259ddaSDenis Bolotin 
739c4259ddaSDenis Bolotin void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc)
740c4259ddaSDenis Bolotin {
741c4259ddaSDenis Bolotin 	p_info->offload_tc = tc;
742c4259ddaSDenis Bolotin 	p_info->offload_tc_set = true;
743c4259ddaSDenis Bolotin }
744c4259ddaSDenis Bolotin 
745c4259ddaSDenis Bolotin static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn)
746c4259ddaSDenis Bolotin {
747c4259ddaSDenis Bolotin 	return p_hwfn->hw_info.offload_tc_set;
748c4259ddaSDenis Bolotin }
749c4259ddaSDenis Bolotin 
750c4259ddaSDenis Bolotin static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn)
751c4259ddaSDenis Bolotin {
752c4259ddaSDenis Bolotin 	if (qed_is_offload_tc_set(p_hwfn))
753c4259ddaSDenis Bolotin 		return p_hwfn->hw_info.offload_tc;
754c4259ddaSDenis Bolotin 
755c4259ddaSDenis Bolotin 	return PQ_INIT_DEFAULT_TC;
756c4259ddaSDenis Bolotin }
757b5a9ee7cSAriel Elior 
758b5a9ee7cSAriel Elior static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
759b5a9ee7cSAriel Elior 			   struct qed_qm_info *qm_info,
760b5a9ee7cSAriel Elior 			   u8 tc, u32 pq_init_flags)
761b5a9ee7cSAriel Elior {
762b5a9ee7cSAriel Elior 	u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn);
763b5a9ee7cSAriel Elior 
764b5a9ee7cSAriel Elior 	if (pq_idx > max_pq)
765b5a9ee7cSAriel Elior 		DP_ERR(p_hwfn,
766b5a9ee7cSAriel Elior 		       "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
767b5a9ee7cSAriel Elior 
768b5a9ee7cSAriel Elior 	/* init pq params */
76950bc60cbSMichal Kalderon 	qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
770b5a9ee7cSAriel Elior 	qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
771b5a9ee7cSAriel Elior 	    qm_info->num_vports;
772b5a9ee7cSAriel Elior 	qm_info->qm_pq_params[pq_idx].tc_id = tc;
773b5a9ee7cSAriel Elior 	qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
774b5a9ee7cSAriel Elior 	qm_info->qm_pq_params[pq_idx].rl_valid =
775b5a9ee7cSAriel Elior 	    (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
776b5a9ee7cSAriel Elior 
777b5a9ee7cSAriel Elior 	/* qm params accounting */
778b5a9ee7cSAriel Elior 	qm_info->num_pqs++;
779b5a9ee7cSAriel Elior 	if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
780b5a9ee7cSAriel Elior 		qm_info->num_vports++;
781b5a9ee7cSAriel Elior 
782b5a9ee7cSAriel Elior 	if (pq_init_flags & PQ_INIT_PF_RL)
783b5a9ee7cSAriel Elior 		qm_info->num_pf_rls++;
784b5a9ee7cSAriel Elior 
785b5a9ee7cSAriel Elior 	if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
786b5a9ee7cSAriel Elior 		DP_ERR(p_hwfn,
787b5a9ee7cSAriel Elior 		       "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
788b5a9ee7cSAriel Elior 		       qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
789b5a9ee7cSAriel Elior 
790b5a9ee7cSAriel Elior 	if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn))
791b5a9ee7cSAriel Elior 		DP_ERR(p_hwfn,
792b5a9ee7cSAriel Elior 		       "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
793b5a9ee7cSAriel Elior 		       qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn));
794b5a9ee7cSAriel Elior }
795b5a9ee7cSAriel Elior 
796b5a9ee7cSAriel Elior /* get pq index according to PQ_FLAGS */
797b5a9ee7cSAriel Elior static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
798ffb057f9SManish Chopra 					   unsigned long pq_flags)
799b5a9ee7cSAriel Elior {
800b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
801b5a9ee7cSAriel Elior 
802b5a9ee7cSAriel Elior 	/* Can't have multiple flags set here */
803ffb057f9SManish Chopra 	if (bitmap_weight(&pq_flags,
804276d43f0SDenis Bolotin 			  sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
805ffb057f9SManish Chopra 		DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
806b5a9ee7cSAriel Elior 		goto err;
807276d43f0SDenis Bolotin 	}
808b5a9ee7cSAriel Elior 
809eb62cca9SDenis Bolotin 	if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
810ffb057f9SManish Chopra 		DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
811eb62cca9SDenis Bolotin 		goto err;
812eb62cca9SDenis Bolotin 	}
813eb62cca9SDenis Bolotin 
814b5a9ee7cSAriel Elior 	switch (pq_flags) {
815b5a9ee7cSAriel Elior 	case PQ_FLAGS_RLS:
816b5a9ee7cSAriel Elior 		return &qm_info->first_rl_pq;
817b5a9ee7cSAriel Elior 	case PQ_FLAGS_MCOS:
818b5a9ee7cSAriel Elior 		return &qm_info->first_mcos_pq;
819b5a9ee7cSAriel Elior 	case PQ_FLAGS_LB:
820b5a9ee7cSAriel Elior 		return &qm_info->pure_lb_pq;
821b5a9ee7cSAriel Elior 	case PQ_FLAGS_OOO:
822b5a9ee7cSAriel Elior 		return &qm_info->ooo_pq;
823b5a9ee7cSAriel Elior 	case PQ_FLAGS_ACK:
824b5a9ee7cSAriel Elior 		return &qm_info->pure_ack_pq;
825b5a9ee7cSAriel Elior 	case PQ_FLAGS_OFLD:
82661be82b0SDenis Bolotin 		return &qm_info->first_ofld_pq;
827b5a9ee7cSAriel Elior 	case PQ_FLAGS_LLT:
82861be82b0SDenis Bolotin 		return &qm_info->first_llt_pq;
829b5a9ee7cSAriel Elior 	case PQ_FLAGS_VFS:
830b5a9ee7cSAriel Elior 		return &qm_info->first_vf_pq;
831b5a9ee7cSAriel Elior 	default:
832b5a9ee7cSAriel Elior 		goto err;
833b5a9ee7cSAriel Elior 	}
834b5a9ee7cSAriel Elior 
835b5a9ee7cSAriel Elior err:
836eb62cca9SDenis Bolotin 	return &qm_info->start_pq;
837b5a9ee7cSAriel Elior }
838b5a9ee7cSAriel Elior 
839b5a9ee7cSAriel Elior /* save pq index in qm info */
840b5a9ee7cSAriel Elior static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn,
841b5a9ee7cSAriel Elior 				u32 pq_flags, u16 pq_val)
842b5a9ee7cSAriel Elior {
843b5a9ee7cSAriel Elior 	u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
844b5a9ee7cSAriel Elior 
845b5a9ee7cSAriel Elior 	*base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
846b5a9ee7cSAriel Elior }
847b5a9ee7cSAriel Elior 
848b5a9ee7cSAriel Elior /* get tx pq index, with the PQ TX base already set (ready for context init) */
849b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags)
850b5a9ee7cSAriel Elior {
851b5a9ee7cSAriel Elior 	u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
852b5a9ee7cSAriel Elior 
853b5a9ee7cSAriel Elior 	return *base_pq_idx + CM_TX_PQ_BASE;
854b5a9ee7cSAriel Elior }
855b5a9ee7cSAriel Elior 
856b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
857b5a9ee7cSAriel Elior {
858b5a9ee7cSAriel Elior 	u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
859b5a9ee7cSAriel Elior 
860eb62cca9SDenis Bolotin 	if (max_tc == 0) {
861eb62cca9SDenis Bolotin 		DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
862eb62cca9SDenis Bolotin 		       PQ_FLAGS_MCOS);
863eb62cca9SDenis Bolotin 		return p_hwfn->qm_info.start_pq;
864eb62cca9SDenis Bolotin 	}
865eb62cca9SDenis Bolotin 
866b5a9ee7cSAriel Elior 	if (tc > max_tc)
867b5a9ee7cSAriel Elior 		DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
868b5a9ee7cSAriel Elior 
869eb62cca9SDenis Bolotin 	return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
870b5a9ee7cSAriel Elior }
871b5a9ee7cSAriel Elior 
872b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
873b5a9ee7cSAriel Elior {
874b5a9ee7cSAriel Elior 	u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
875b5a9ee7cSAriel Elior 
876eb62cca9SDenis Bolotin 	if (max_vf == 0) {
877eb62cca9SDenis Bolotin 		DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
878eb62cca9SDenis Bolotin 		       PQ_FLAGS_VFS);
879eb62cca9SDenis Bolotin 		return p_hwfn->qm_info.start_pq;
880eb62cca9SDenis Bolotin 	}
881eb62cca9SDenis Bolotin 
882b5a9ee7cSAriel Elior 	if (vf > max_vf)
883b5a9ee7cSAriel Elior 		DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
884b5a9ee7cSAriel Elior 
885eb62cca9SDenis Bolotin 	return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
886b5a9ee7cSAriel Elior }
887b5a9ee7cSAriel Elior 
88861be82b0SDenis Bolotin u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
88961be82b0SDenis Bolotin {
89061be82b0SDenis Bolotin 	u16 first_ofld_pq, pq_offset;
89161be82b0SDenis Bolotin 
89261be82b0SDenis Bolotin 	first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
89361be82b0SDenis Bolotin 	pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
89461be82b0SDenis Bolotin 		    tc : PQ_INIT_DEFAULT_TC;
89561be82b0SDenis Bolotin 
89661be82b0SDenis Bolotin 	return first_ofld_pq + pq_offset;
89761be82b0SDenis Bolotin }
89861be82b0SDenis Bolotin 
89961be82b0SDenis Bolotin u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc)
90061be82b0SDenis Bolotin {
90161be82b0SDenis Bolotin 	u16 first_llt_pq, pq_offset;
90261be82b0SDenis Bolotin 
90361be82b0SDenis Bolotin 	first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
90461be82b0SDenis Bolotin 	pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
90561be82b0SDenis Bolotin 		    tc : PQ_INIT_DEFAULT_TC;
90661be82b0SDenis Bolotin 
90761be82b0SDenis Bolotin 	return first_llt_pq + pq_offset;
90861be82b0SDenis Bolotin }
90961be82b0SDenis Bolotin 
910b5a9ee7cSAriel Elior /* Functions for creating specific types of pqs */
911b5a9ee7cSAriel Elior static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn)
912b5a9ee7cSAriel Elior {
913b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
914b5a9ee7cSAriel Elior 
915b5a9ee7cSAriel Elior 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
916b5a9ee7cSAriel Elior 		return;
917b5a9ee7cSAriel Elior 
918b5a9ee7cSAriel Elior 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
919b5a9ee7cSAriel Elior 	qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
920b5a9ee7cSAriel Elior }
921b5a9ee7cSAriel Elior 
922b5a9ee7cSAriel Elior static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn)
923b5a9ee7cSAriel Elior {
924b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
925b5a9ee7cSAriel Elior 
926b5a9ee7cSAriel Elior 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
927b5a9ee7cSAriel Elior 		return;
928b5a9ee7cSAriel Elior 
929b5a9ee7cSAriel Elior 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
930b5a9ee7cSAriel Elior 	qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
931b5a9ee7cSAriel Elior }
932b5a9ee7cSAriel Elior 
933b5a9ee7cSAriel Elior static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
934b5a9ee7cSAriel Elior {
935b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
936b5a9ee7cSAriel Elior 
937b5a9ee7cSAriel Elior 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
938b5a9ee7cSAriel Elior 		return;
939b5a9ee7cSAriel Elior 
940b5a9ee7cSAriel Elior 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
941c4259ddaSDenis Bolotin 	qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
942c4259ddaSDenis Bolotin 		       PQ_INIT_SHARE_VPORT);
943b5a9ee7cSAriel Elior }
944b5a9ee7cSAriel Elior 
94561be82b0SDenis Bolotin static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn)
94661be82b0SDenis Bolotin {
94761be82b0SDenis Bolotin 	u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn);
94861be82b0SDenis Bolotin 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
94961be82b0SDenis Bolotin 	u8 tc;
95061be82b0SDenis Bolotin 
95161be82b0SDenis Bolotin 	/* override pq's TC if offload TC is set */
95261be82b0SDenis Bolotin 	for (tc = 0; tc < num_tcs; tc++)
95361be82b0SDenis Bolotin 		qed_init_qm_pq(p_hwfn, qm_info,
95461be82b0SDenis Bolotin 			       qed_is_offload_tc_set(p_hwfn) ?
95561be82b0SDenis Bolotin 			       p_hwfn->hw_info.offload_tc : tc,
95661be82b0SDenis Bolotin 			       PQ_INIT_SHARE_VPORT);
95761be82b0SDenis Bolotin }
95861be82b0SDenis Bolotin 
959b5a9ee7cSAriel Elior static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
960b5a9ee7cSAriel Elior {
961b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
962b5a9ee7cSAriel Elior 
963b5a9ee7cSAriel Elior 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
964b5a9ee7cSAriel Elior 		return;
965b5a9ee7cSAriel Elior 
966b5a9ee7cSAriel Elior 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
96761be82b0SDenis Bolotin 	qed_init_qm_mtc_pqs(p_hwfn);
968b5a9ee7cSAriel Elior }
969b5a9ee7cSAriel Elior 
970b5a9ee7cSAriel Elior static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
971b5a9ee7cSAriel Elior {
972b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
973b5a9ee7cSAriel Elior 
974b5a9ee7cSAriel Elior 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
975b5a9ee7cSAriel Elior 		return;
976b5a9ee7cSAriel Elior 
977b5a9ee7cSAriel Elior 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
97861be82b0SDenis Bolotin 	qed_init_qm_mtc_pqs(p_hwfn);
979b5a9ee7cSAriel Elior }
980b5a9ee7cSAriel Elior 
981b5a9ee7cSAriel Elior static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
982b5a9ee7cSAriel Elior {
983b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
984b5a9ee7cSAriel Elior 	u8 tc_idx;
985b5a9ee7cSAriel Elior 
986b5a9ee7cSAriel Elior 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
987b5a9ee7cSAriel Elior 		return;
988b5a9ee7cSAriel Elior 
989b5a9ee7cSAriel Elior 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
990b5a9ee7cSAriel Elior 	for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++)
991b5a9ee7cSAriel Elior 		qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
992b5a9ee7cSAriel Elior }
993b5a9ee7cSAriel Elior 
994b5a9ee7cSAriel Elior static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn)
995b5a9ee7cSAriel Elior {
996b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
997b5a9ee7cSAriel Elior 	u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
998b5a9ee7cSAriel Elior 
999b5a9ee7cSAriel Elior 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
1000b5a9ee7cSAriel Elior 		return;
1001b5a9ee7cSAriel Elior 
1002b5a9ee7cSAriel Elior 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
10031408cc1fSYuval Mintz 	qm_info->num_vf_pqs = num_vfs;
1004b5a9ee7cSAriel Elior 	for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
1005b5a9ee7cSAriel Elior 		qed_init_qm_pq(p_hwfn,
1006b5a9ee7cSAriel Elior 			       qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
1007b5a9ee7cSAriel Elior }
1008fe56b9e6SYuval Mintz 
1009b5a9ee7cSAriel Elior static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
1010b5a9ee7cSAriel Elior {
1011b5a9ee7cSAriel Elior 	u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn);
1012b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1013a64b02d5SManish Chopra 
1014b5a9ee7cSAriel Elior 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
1015b5a9ee7cSAriel Elior 		return;
1016b5a9ee7cSAriel Elior 
1017b5a9ee7cSAriel Elior 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
1018b5a9ee7cSAriel Elior 	for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
1019c4259ddaSDenis Bolotin 		qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
1020c4259ddaSDenis Bolotin 			       PQ_INIT_PF_RL);
1021b5a9ee7cSAriel Elior }
1022b5a9ee7cSAriel Elior 
1023b5a9ee7cSAriel Elior static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
1024b5a9ee7cSAriel Elior {
1025b5a9ee7cSAriel Elior 	/* rate limited pqs, must come first (FW assumption) */
1026b5a9ee7cSAriel Elior 	qed_init_qm_rl_pqs(p_hwfn);
1027b5a9ee7cSAriel Elior 
1028b5a9ee7cSAriel Elior 	/* pqs for multi cos */
1029b5a9ee7cSAriel Elior 	qed_init_qm_mcos_pqs(p_hwfn);
1030b5a9ee7cSAriel Elior 
1031b5a9ee7cSAriel Elior 	/* pure loopback pq */
1032b5a9ee7cSAriel Elior 	qed_init_qm_lb_pq(p_hwfn);
1033b5a9ee7cSAriel Elior 
1034b5a9ee7cSAriel Elior 	/* out of order pq */
1035b5a9ee7cSAriel Elior 	qed_init_qm_ooo_pq(p_hwfn);
1036b5a9ee7cSAriel Elior 
1037b5a9ee7cSAriel Elior 	/* pure ack pq */
1038b5a9ee7cSAriel Elior 	qed_init_qm_pure_ack_pq(p_hwfn);
1039b5a9ee7cSAriel Elior 
1040b5a9ee7cSAriel Elior 	/* pq for offloaded protocol */
1041b5a9ee7cSAriel Elior 	qed_init_qm_offload_pq(p_hwfn);
1042b5a9ee7cSAriel Elior 
1043b5a9ee7cSAriel Elior 	/* low latency pq */
1044b5a9ee7cSAriel Elior 	qed_init_qm_low_latency_pq(p_hwfn);
1045b5a9ee7cSAriel Elior 
1046b5a9ee7cSAriel Elior 	/* done sharing vports */
1047b5a9ee7cSAriel Elior 	qed_init_qm_advance_vport(p_hwfn);
1048b5a9ee7cSAriel Elior 
1049b5a9ee7cSAriel Elior 	/* pqs for vfs */
1050b5a9ee7cSAriel Elior 	qed_init_qm_vf_pqs(p_hwfn);
1051b5a9ee7cSAriel Elior }
1052b5a9ee7cSAriel Elior 
1053b5a9ee7cSAriel Elior /* compare values of getters against resources amounts */
1054b5a9ee7cSAriel Elior static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
1055b5a9ee7cSAriel Elior {
1056b5a9ee7cSAriel Elior 	if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) {
1057b5a9ee7cSAriel Elior 		DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
1058b5a9ee7cSAriel Elior 		return -EINVAL;
1059b5a9ee7cSAriel Elior 	}
1060b5a9ee7cSAriel Elior 
106161be82b0SDenis Bolotin 	if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
106261be82b0SDenis Bolotin 		return 0;
106361be82b0SDenis Bolotin 
106461be82b0SDenis Bolotin 	if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
106561be82b0SDenis Bolotin 		p_hwfn->hw_info.multi_tc_roce_en = 0;
106661be82b0SDenis Bolotin 		DP_NOTICE(p_hwfn,
106761be82b0SDenis Bolotin 			  "multi-tc roce was disabled to reduce requested amount of pqs\n");
106861be82b0SDenis Bolotin 		if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
106961be82b0SDenis Bolotin 			return 0;
1070b5a9ee7cSAriel Elior 	}
1071fe56b9e6SYuval Mintz 
107261be82b0SDenis Bolotin 	DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
107361be82b0SDenis Bolotin 	return -EINVAL;
1074b5a9ee7cSAriel Elior }
1075fe56b9e6SYuval Mintz 
1076b5a9ee7cSAriel Elior static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
1077b5a9ee7cSAriel Elior {
1078b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1079b5a9ee7cSAriel Elior 	struct init_qm_vport_params *vport;
1080b5a9ee7cSAriel Elior 	struct init_qm_port_params *port;
1081b5a9ee7cSAriel Elior 	struct init_qm_pq_params *pq;
1082b5a9ee7cSAriel Elior 	int i, tc;
1083b5a9ee7cSAriel Elior 
1084b5a9ee7cSAriel Elior 	/* top level params */
1085b5a9ee7cSAriel Elior 	DP_VERBOSE(p_hwfn,
1086b5a9ee7cSAriel Elior 		   NETIF_MSG_HW,
108761be82b0SDenis Bolotin 		   "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n",
1088b5a9ee7cSAriel Elior 		   qm_info->start_pq,
1089b5a9ee7cSAriel Elior 		   qm_info->start_vport,
1090b5a9ee7cSAriel Elior 		   qm_info->pure_lb_pq,
109161be82b0SDenis Bolotin 		   qm_info->first_ofld_pq,
109261be82b0SDenis Bolotin 		   qm_info->first_llt_pq,
109361be82b0SDenis Bolotin 		   qm_info->pure_ack_pq);
1094b5a9ee7cSAriel Elior 	DP_VERBOSE(p_hwfn,
1095b5a9ee7cSAriel Elior 		   NETIF_MSG_HW,
1096b5a9ee7cSAriel Elior 		   "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
1097b5a9ee7cSAriel Elior 		   qm_info->ooo_pq,
1098b5a9ee7cSAriel Elior 		   qm_info->first_vf_pq,
1099b5a9ee7cSAriel Elior 		   qm_info->num_pqs,
1100b5a9ee7cSAriel Elior 		   qm_info->num_vf_pqs,
1101b5a9ee7cSAriel Elior 		   qm_info->num_vports, qm_info->max_phys_tcs_per_port);
1102b5a9ee7cSAriel Elior 	DP_VERBOSE(p_hwfn,
1103b5a9ee7cSAriel Elior 		   NETIF_MSG_HW,
1104b5a9ee7cSAriel Elior 		   "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
1105b5a9ee7cSAriel Elior 		   qm_info->pf_rl_en,
1106b5a9ee7cSAriel Elior 		   qm_info->pf_wfq_en,
1107b5a9ee7cSAriel Elior 		   qm_info->vport_rl_en,
1108b5a9ee7cSAriel Elior 		   qm_info->vport_wfq_en,
1109b5a9ee7cSAriel Elior 		   qm_info->pf_wfq,
1110b5a9ee7cSAriel Elior 		   qm_info->pf_rl,
1111b5a9ee7cSAriel Elior 		   qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
1112b5a9ee7cSAriel Elior 
1113b5a9ee7cSAriel Elior 	/* port table */
111478cea9ffSTomer Tayar 	for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) {
1115b5a9ee7cSAriel Elior 		port = &(qm_info->qm_port_params[i]);
1116b5a9ee7cSAriel Elior 		DP_VERBOSE(p_hwfn,
1117b5a9ee7cSAriel Elior 			   NETIF_MSG_HW,
1118b5a9ee7cSAriel Elior 			   "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
1119b5a9ee7cSAriel Elior 			   i,
1120b5a9ee7cSAriel Elior 			   port->active,
1121b5a9ee7cSAriel Elior 			   port->active_phys_tcs,
1122b5a9ee7cSAriel Elior 			   port->num_pbf_cmd_lines,
1123b5a9ee7cSAriel Elior 			   port->num_btb_blocks, port->reserved);
1124b5a9ee7cSAriel Elior 	}
1125b5a9ee7cSAriel Elior 
1126b5a9ee7cSAriel Elior 	/* vport table */
1127b5a9ee7cSAriel Elior 	for (i = 0; i < qm_info->num_vports; i++) {
1128b5a9ee7cSAriel Elior 		vport = &(qm_info->qm_vport_params[i]);
1129b5a9ee7cSAriel Elior 		DP_VERBOSE(p_hwfn,
1130b5a9ee7cSAriel Elior 			   NETIF_MSG_HW,
1131b5a9ee7cSAriel Elior 			   "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
1132b5a9ee7cSAriel Elior 			   qm_info->start_vport + i,
1133b5a9ee7cSAriel Elior 			   vport->vport_rl, vport->vport_wfq);
1134b5a9ee7cSAriel Elior 		for (tc = 0; tc < NUM_OF_TCS; tc++)
1135b5a9ee7cSAriel Elior 			DP_VERBOSE(p_hwfn,
1136b5a9ee7cSAriel Elior 				   NETIF_MSG_HW,
1137b5a9ee7cSAriel Elior 				   "%d ", vport->first_tx_pq_id[tc]);
1138b5a9ee7cSAriel Elior 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n");
1139b5a9ee7cSAriel Elior 	}
1140b5a9ee7cSAriel Elior 
1141b5a9ee7cSAriel Elior 	/* pq table */
1142b5a9ee7cSAriel Elior 	for (i = 0; i < qm_info->num_pqs; i++) {
1143b5a9ee7cSAriel Elior 		pq = &(qm_info->qm_pq_params[i]);
1144b5a9ee7cSAriel Elior 		DP_VERBOSE(p_hwfn,
1145b5a9ee7cSAriel Elior 			   NETIF_MSG_HW,
114650bc60cbSMichal Kalderon 			   "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
1147b5a9ee7cSAriel Elior 			   qm_info->start_pq + i,
114850bc60cbSMichal Kalderon 			   pq->port_id,
1149b5a9ee7cSAriel Elior 			   pq->vport_id,
1150b5a9ee7cSAriel Elior 			   pq->tc_id, pq->wrr_group, pq->rl_valid);
1151b5a9ee7cSAriel Elior 	}
1152b5a9ee7cSAriel Elior }
1153b5a9ee7cSAriel Elior 
1154b5a9ee7cSAriel Elior static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
1155b5a9ee7cSAriel Elior {
1156b5a9ee7cSAriel Elior 	/* reset params required for init run */
1157b5a9ee7cSAriel Elior 	qed_init_qm_reset_params(p_hwfn);
1158b5a9ee7cSAriel Elior 
1159b5a9ee7cSAriel Elior 	/* init QM top level params */
1160b5a9ee7cSAriel Elior 	qed_init_qm_params(p_hwfn);
1161b5a9ee7cSAriel Elior 
1162b5a9ee7cSAriel Elior 	/* init QM port params */
1163b5a9ee7cSAriel Elior 	qed_init_qm_port_params(p_hwfn);
1164b5a9ee7cSAriel Elior 
1165b5a9ee7cSAriel Elior 	/* init QM vport params */
1166b5a9ee7cSAriel Elior 	qed_init_qm_vport_params(p_hwfn);
1167b5a9ee7cSAriel Elior 
1168b5a9ee7cSAriel Elior 	/* init QM physical queue params */
1169b5a9ee7cSAriel Elior 	qed_init_qm_pq_params(p_hwfn);
1170b5a9ee7cSAriel Elior 
1171b5a9ee7cSAriel Elior 	/* display all that init */
1172b5a9ee7cSAriel Elior 	qed_dp_init_qm_params(p_hwfn);
1173fe56b9e6SYuval Mintz }
1174fe56b9e6SYuval Mintz 
117539651abdSSudarsana Reddy Kalluru /* This function reconfigures the QM pf on the fly.
117639651abdSSudarsana Reddy Kalluru  * For this purpose we:
117739651abdSSudarsana Reddy Kalluru  * 1. reconfigure the QM database
1178a2e7699eSTomer Tayar  * 2. set new values to runtime array
117939651abdSSudarsana Reddy Kalluru  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
118039651abdSSudarsana Reddy Kalluru  * 4. activate init tool in QM_PF stage
118139651abdSSudarsana Reddy Kalluru  * 5. send an sdm_qm_cmd through rbc interface to release the QM
118239651abdSSudarsana Reddy Kalluru  */
118339651abdSSudarsana Reddy Kalluru int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
118439651abdSSudarsana Reddy Kalluru {
118539651abdSSudarsana Reddy Kalluru 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
118639651abdSSudarsana Reddy Kalluru 	bool b_rc;
118739651abdSSudarsana Reddy Kalluru 	int rc;
118839651abdSSudarsana Reddy Kalluru 
118939651abdSSudarsana Reddy Kalluru 	/* initialize qed's qm data structure */
1190b5a9ee7cSAriel Elior 	qed_init_qm_info(p_hwfn);
119139651abdSSudarsana Reddy Kalluru 
119239651abdSSudarsana Reddy Kalluru 	/* stop PF's qm queues */
119339651abdSSudarsana Reddy Kalluru 	spin_lock_bh(&qm_lock);
119439651abdSSudarsana Reddy Kalluru 	b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
119539651abdSSudarsana Reddy Kalluru 				    qm_info->start_pq, qm_info->num_pqs);
119639651abdSSudarsana Reddy Kalluru 	spin_unlock_bh(&qm_lock);
119739651abdSSudarsana Reddy Kalluru 	if (!b_rc)
119839651abdSSudarsana Reddy Kalluru 		return -EINVAL;
119939651abdSSudarsana Reddy Kalluru 
120039651abdSSudarsana Reddy Kalluru 	/* clear the QM_PF runtime phase leftovers from previous init */
120139651abdSSudarsana Reddy Kalluru 	qed_init_clear_rt_data(p_hwfn);
120239651abdSSudarsana Reddy Kalluru 
120339651abdSSudarsana Reddy Kalluru 	/* prepare QM portion of runtime array */
1204da090917STomer Tayar 	qed_qm_init_pf(p_hwfn, p_ptt, false);
120539651abdSSudarsana Reddy Kalluru 
120639651abdSSudarsana Reddy Kalluru 	/* activate init tool on runtime array */
120739651abdSSudarsana Reddy Kalluru 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
120839651abdSSudarsana Reddy Kalluru 			  p_hwfn->hw_info.hw_mode);
120939651abdSSudarsana Reddy Kalluru 	if (rc)
121039651abdSSudarsana Reddy Kalluru 		return rc;
121139651abdSSudarsana Reddy Kalluru 
121239651abdSSudarsana Reddy Kalluru 	/* start PF's qm queues */
121339651abdSSudarsana Reddy Kalluru 	spin_lock_bh(&qm_lock);
121439651abdSSudarsana Reddy Kalluru 	b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
121539651abdSSudarsana Reddy Kalluru 				    qm_info->start_pq, qm_info->num_pqs);
121639651abdSSudarsana Reddy Kalluru 	spin_unlock_bh(&qm_lock);
121739651abdSSudarsana Reddy Kalluru 	if (!b_rc)
121839651abdSSudarsana Reddy Kalluru 		return -EINVAL;
121939651abdSSudarsana Reddy Kalluru 
122039651abdSSudarsana Reddy Kalluru 	return 0;
122139651abdSSudarsana Reddy Kalluru }
122239651abdSSudarsana Reddy Kalluru 
1223b5a9ee7cSAriel Elior static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
1224b5a9ee7cSAriel Elior {
1225b5a9ee7cSAriel Elior 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1226b5a9ee7cSAriel Elior 	int rc;
1227b5a9ee7cSAriel Elior 
1228b5a9ee7cSAriel Elior 	rc = qed_init_qm_sanity(p_hwfn);
1229b5a9ee7cSAriel Elior 	if (rc)
1230b5a9ee7cSAriel Elior 		goto alloc_err;
1231b5a9ee7cSAriel Elior 
12326396bb22SKees Cook 	qm_info->qm_pq_params = kcalloc(qed_init_qm_get_num_pqs(p_hwfn),
12336396bb22SKees Cook 					sizeof(*qm_info->qm_pq_params),
1234b5a9ee7cSAriel Elior 					GFP_KERNEL);
1235b5a9ee7cSAriel Elior 	if (!qm_info->qm_pq_params)
1236b5a9ee7cSAriel Elior 		goto alloc_err;
1237b5a9ee7cSAriel Elior 
12386396bb22SKees Cook 	qm_info->qm_vport_params = kcalloc(qed_init_qm_get_num_vports(p_hwfn),
12396396bb22SKees Cook 					   sizeof(*qm_info->qm_vport_params),
1240b5a9ee7cSAriel Elior 					   GFP_KERNEL);
1241b5a9ee7cSAriel Elior 	if (!qm_info->qm_vport_params)
1242b5a9ee7cSAriel Elior 		goto alloc_err;
1243b5a9ee7cSAriel Elior 
12446396bb22SKees Cook 	qm_info->qm_port_params = kcalloc(p_hwfn->cdev->num_ports_in_engine,
12456396bb22SKees Cook 					  sizeof(*qm_info->qm_port_params),
1246b5a9ee7cSAriel Elior 					  GFP_KERNEL);
1247b5a9ee7cSAriel Elior 	if (!qm_info->qm_port_params)
1248b5a9ee7cSAriel Elior 		goto alloc_err;
1249b5a9ee7cSAriel Elior 
12506396bb22SKees Cook 	qm_info->wfq_data = kcalloc(qed_init_qm_get_num_vports(p_hwfn),
12516396bb22SKees Cook 				    sizeof(*qm_info->wfq_data),
1252b5a9ee7cSAriel Elior 				    GFP_KERNEL);
1253b5a9ee7cSAriel Elior 	if (!qm_info->wfq_data)
1254b5a9ee7cSAriel Elior 		goto alloc_err;
1255b5a9ee7cSAriel Elior 
1256b5a9ee7cSAriel Elior 	return 0;
1257b5a9ee7cSAriel Elior 
1258b5a9ee7cSAriel Elior alloc_err:
1259b5a9ee7cSAriel Elior 	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
1260b5a9ee7cSAriel Elior 	qed_qm_info_free(p_hwfn);
1261b5a9ee7cSAriel Elior 	return -ENOMEM;
1262b5a9ee7cSAriel Elior }
1263b5a9ee7cSAriel Elior 
1264fe56b9e6SYuval Mintz int qed_resc_alloc(struct qed_dev *cdev)
1265fe56b9e6SYuval Mintz {
1266f9dc4d1fSRam Amrani 	u32 rdma_tasks, excess_tasks;
1267f9dc4d1fSRam Amrani 	u32 line_count;
1268fe56b9e6SYuval Mintz 	int i, rc = 0;
1269fe56b9e6SYuval Mintz 
12700db711bbSMintz, Yuval 	if (IS_VF(cdev)) {
12710db711bbSMintz, Yuval 		for_each_hwfn(cdev, i) {
12720db711bbSMintz, Yuval 			rc = qed_l2_alloc(&cdev->hwfns[i]);
12730db711bbSMintz, Yuval 			if (rc)
12741408cc1fSYuval Mintz 				return rc;
12750db711bbSMintz, Yuval 		}
12760db711bbSMintz, Yuval 		return rc;
12770db711bbSMintz, Yuval 	}
12781408cc1fSYuval Mintz 
1279fe56b9e6SYuval Mintz 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
1280fe56b9e6SYuval Mintz 	if (!cdev->fw_data)
1281fe56b9e6SYuval Mintz 		return -ENOMEM;
1282fe56b9e6SYuval Mintz 
1283fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1284fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1285dbb799c3SYuval Mintz 		u32 n_eqes, num_cons;
1286fe56b9e6SYuval Mintz 
128736907cd5SAriel Elior 		/* Initialize the doorbell recovery mechanism */
128836907cd5SAriel Elior 		rc = qed_db_recovery_setup(p_hwfn);
128936907cd5SAriel Elior 		if (rc)
129036907cd5SAriel Elior 			goto alloc_err;
129136907cd5SAriel Elior 
1292fe56b9e6SYuval Mintz 		/* First allocate the context manager structure */
1293fe56b9e6SYuval Mintz 		rc = qed_cxt_mngr_alloc(p_hwfn);
1294fe56b9e6SYuval Mintz 		if (rc)
1295fe56b9e6SYuval Mintz 			goto alloc_err;
1296fe56b9e6SYuval Mintz 
1297fe56b9e6SYuval Mintz 		/* Set the HW cid/tid numbers (in the contest manager)
1298fe56b9e6SYuval Mintz 		 * Must be done prior to any further computations.
1299fe56b9e6SYuval Mintz 		 */
1300f9dc4d1fSRam Amrani 		rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
1301fe56b9e6SYuval Mintz 		if (rc)
1302fe56b9e6SYuval Mintz 			goto alloc_err;
1303fe56b9e6SYuval Mintz 
1304b5a9ee7cSAriel Elior 		rc = qed_alloc_qm_data(p_hwfn);
1305fe56b9e6SYuval Mintz 		if (rc)
1306fe56b9e6SYuval Mintz 			goto alloc_err;
1307fe56b9e6SYuval Mintz 
1308b5a9ee7cSAriel Elior 		/* init qm info */
1309b5a9ee7cSAriel Elior 		qed_init_qm_info(p_hwfn);
1310b5a9ee7cSAriel Elior 
1311fe56b9e6SYuval Mintz 		/* Compute the ILT client partition */
1312f9dc4d1fSRam Amrani 		rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
1313f9dc4d1fSRam Amrani 		if (rc) {
1314f9dc4d1fSRam Amrani 			DP_NOTICE(p_hwfn,
1315f9dc4d1fSRam Amrani 				  "too many ILT lines; re-computing with less lines\n");
1316f9dc4d1fSRam Amrani 			/* In case there are not enough ILT lines we reduce the
1317f9dc4d1fSRam Amrani 			 * number of RDMA tasks and re-compute.
1318f9dc4d1fSRam Amrani 			 */
1319f9dc4d1fSRam Amrani 			excess_tasks =
1320f9dc4d1fSRam Amrani 			    qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count);
1321f9dc4d1fSRam Amrani 			if (!excess_tasks)
1322f9dc4d1fSRam Amrani 				goto alloc_err;
1323f9dc4d1fSRam Amrani 
1324f9dc4d1fSRam Amrani 			rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
1325f9dc4d1fSRam Amrani 			rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks);
1326fe56b9e6SYuval Mintz 			if (rc)
1327fe56b9e6SYuval Mintz 				goto alloc_err;
1328fe56b9e6SYuval Mintz 
1329f9dc4d1fSRam Amrani 			rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
1330f9dc4d1fSRam Amrani 			if (rc) {
1331f9dc4d1fSRam Amrani 				DP_ERR(p_hwfn,
1332f9dc4d1fSRam Amrani 				       "failed ILT compute. Requested too many lines: %u\n",
1333f9dc4d1fSRam Amrani 				       line_count);
1334f9dc4d1fSRam Amrani 
1335f9dc4d1fSRam Amrani 				goto alloc_err;
1336f9dc4d1fSRam Amrani 			}
1337f9dc4d1fSRam Amrani 		}
1338f9dc4d1fSRam Amrani 
1339fe56b9e6SYuval Mintz 		/* CID map / ILT shadow table / T2
1340fe56b9e6SYuval Mintz 		 * The talbes sizes are determined by the computations above
1341fe56b9e6SYuval Mintz 		 */
1342fe56b9e6SYuval Mintz 		rc = qed_cxt_tables_alloc(p_hwfn);
1343fe56b9e6SYuval Mintz 		if (rc)
1344fe56b9e6SYuval Mintz 			goto alloc_err;
1345fe56b9e6SYuval Mintz 
1346fe56b9e6SYuval Mintz 		/* SPQ, must follow ILT because initializes SPQ context */
1347fe56b9e6SYuval Mintz 		rc = qed_spq_alloc(p_hwfn);
1348fe56b9e6SYuval Mintz 		if (rc)
1349fe56b9e6SYuval Mintz 			goto alloc_err;
1350fe56b9e6SYuval Mintz 
1351fe56b9e6SYuval Mintz 		/* SP status block allocation */
1352fe56b9e6SYuval Mintz 		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
1353fe56b9e6SYuval Mintz 							 RESERVED_PTT_DPC);
1354fe56b9e6SYuval Mintz 
1355fe56b9e6SYuval Mintz 		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
1356fe56b9e6SYuval Mintz 		if (rc)
1357fe56b9e6SYuval Mintz 			goto alloc_err;
1358fe56b9e6SYuval Mintz 
135932a47e72SYuval Mintz 		rc = qed_iov_alloc(p_hwfn);
136032a47e72SYuval Mintz 		if (rc)
136132a47e72SYuval Mintz 			goto alloc_err;
136232a47e72SYuval Mintz 
1363fe56b9e6SYuval Mintz 		/* EQ */
1364dbb799c3SYuval Mintz 		n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
1365c851a9dcSKalderon, Michal 		if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
136667b40dccSKalderon, Michal 			enum protocol_type rdma_proto;
136767b40dccSKalderon, Michal 
136867b40dccSKalderon, Michal 			if (QED_IS_ROCE_PERSONALITY(p_hwfn))
136967b40dccSKalderon, Michal 				rdma_proto = PROTOCOLID_ROCE;
137067b40dccSKalderon, Michal 			else
137167b40dccSKalderon, Michal 				rdma_proto = PROTOCOLID_IWARP;
137267b40dccSKalderon, Michal 
1373dbb799c3SYuval Mintz 			num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
137467b40dccSKalderon, Michal 							       rdma_proto,
13758c93beafSYuval Mintz 							       NULL) * 2;
1376dbb799c3SYuval Mintz 			n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
1377dbb799c3SYuval Mintz 		} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
1378dbb799c3SYuval Mintz 			num_cons =
1379dbb799c3SYuval Mintz 			    qed_cxt_get_proto_cid_count(p_hwfn,
13808c93beafSYuval Mintz 							PROTOCOLID_ISCSI,
13818c93beafSYuval Mintz 							NULL);
1382dbb799c3SYuval Mintz 			n_eqes += 2 * num_cons;
1383dbb799c3SYuval Mintz 		}
1384dbb799c3SYuval Mintz 
1385dbb799c3SYuval Mintz 		if (n_eqes > 0xFFFF) {
1386dbb799c3SYuval Mintz 			DP_ERR(p_hwfn,
1387dbb799c3SYuval Mintz 			       "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
1388dbb799c3SYuval Mintz 			       n_eqes, 0xFFFF);
13893587cb87STomer Tayar 			goto alloc_no_mem;
13909b15acbfSDan Carpenter 		}
1391dbb799c3SYuval Mintz 
13923587cb87STomer Tayar 		rc = qed_eq_alloc(p_hwfn, (u16) n_eqes);
13933587cb87STomer Tayar 		if (rc)
13943587cb87STomer Tayar 			goto alloc_err;
1395fe56b9e6SYuval Mintz 
13963587cb87STomer Tayar 		rc = qed_consq_alloc(p_hwfn);
13973587cb87STomer Tayar 		if (rc)
13983587cb87STomer Tayar 			goto alloc_err;
1399fe56b9e6SYuval Mintz 
14000db711bbSMintz, Yuval 		rc = qed_l2_alloc(p_hwfn);
14010db711bbSMintz, Yuval 		if (rc)
14020db711bbSMintz, Yuval 			goto alloc_err;
14030db711bbSMintz, Yuval 
14040a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2
14050a7fb11cSYuval Mintz 		if (p_hwfn->using_ll2) {
14063587cb87STomer Tayar 			rc = qed_ll2_alloc(p_hwfn);
14073587cb87STomer Tayar 			if (rc)
14083587cb87STomer Tayar 				goto alloc_err;
14090a7fb11cSYuval Mintz 		}
14100a7fb11cSYuval Mintz #endif
14111e128c81SArun Easi 
14121e128c81SArun Easi 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
14133587cb87STomer Tayar 			rc = qed_fcoe_alloc(p_hwfn);
14143587cb87STomer Tayar 			if (rc)
14153587cb87STomer Tayar 				goto alloc_err;
14161e128c81SArun Easi 		}
14171e128c81SArun Easi 
1418fc831825SYuval Mintz 		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
14193587cb87STomer Tayar 			rc = qed_iscsi_alloc(p_hwfn);
14203587cb87STomer Tayar 			if (rc)
14213587cb87STomer Tayar 				goto alloc_err;
14223587cb87STomer Tayar 			rc = qed_ooo_alloc(p_hwfn);
14233587cb87STomer Tayar 			if (rc)
14243587cb87STomer Tayar 				goto alloc_err;
1425fc831825SYuval Mintz 		}
14260a7fb11cSYuval Mintz 
1427291d57f6SMichal Kalderon 		if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
1428291d57f6SMichal Kalderon 			rc = qed_rdma_info_alloc(p_hwfn);
1429291d57f6SMichal Kalderon 			if (rc)
1430291d57f6SMichal Kalderon 				goto alloc_err;
1431291d57f6SMichal Kalderon 		}
1432291d57f6SMichal Kalderon 
1433fe56b9e6SYuval Mintz 		/* DMA info initialization */
1434fe56b9e6SYuval Mintz 		rc = qed_dmae_info_alloc(p_hwfn);
14352591c280SJoe Perches 		if (rc)
1436fe56b9e6SYuval Mintz 			goto alloc_err;
143739651abdSSudarsana Reddy Kalluru 
143839651abdSSudarsana Reddy Kalluru 		/* DCBX initialization */
143939651abdSSudarsana Reddy Kalluru 		rc = qed_dcbx_info_alloc(p_hwfn);
14402591c280SJoe Perches 		if (rc)
144139651abdSSudarsana Reddy Kalluru 			goto alloc_err;
1442a3f72307SDenis Bolotin 
1443a3f72307SDenis Bolotin 		rc = qed_dbg_alloc_user_data(p_hwfn);
1444a3f72307SDenis Bolotin 		if (rc)
1445a3f72307SDenis Bolotin 			goto alloc_err;
144639651abdSSudarsana Reddy Kalluru 	}
1447fe56b9e6SYuval Mintz 
1448fe56b9e6SYuval Mintz 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
14492591c280SJoe Perches 	if (!cdev->reset_stats)
145083aeb933SYuval Mintz 		goto alloc_no_mem;
1451fe56b9e6SYuval Mintz 
1452fe56b9e6SYuval Mintz 	return 0;
1453fe56b9e6SYuval Mintz 
1454dbb799c3SYuval Mintz alloc_no_mem:
1455dbb799c3SYuval Mintz 	rc = -ENOMEM;
1456fe56b9e6SYuval Mintz alloc_err:
1457fe56b9e6SYuval Mintz 	qed_resc_free(cdev);
1458fe56b9e6SYuval Mintz 	return rc;
1459fe56b9e6SYuval Mintz }
1460fe56b9e6SYuval Mintz 
1461fe56b9e6SYuval Mintz void qed_resc_setup(struct qed_dev *cdev)
1462fe56b9e6SYuval Mintz {
1463fe56b9e6SYuval Mintz 	int i;
1464fe56b9e6SYuval Mintz 
14650db711bbSMintz, Yuval 	if (IS_VF(cdev)) {
14660db711bbSMintz, Yuval 		for_each_hwfn(cdev, i)
14670db711bbSMintz, Yuval 			qed_l2_setup(&cdev->hwfns[i]);
14681408cc1fSYuval Mintz 		return;
14690db711bbSMintz, Yuval 	}
14701408cc1fSYuval Mintz 
1471fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1472fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1473fe56b9e6SYuval Mintz 
1474fe56b9e6SYuval Mintz 		qed_cxt_mngr_setup(p_hwfn);
1475fe56b9e6SYuval Mintz 		qed_spq_setup(p_hwfn);
14763587cb87STomer Tayar 		qed_eq_setup(p_hwfn);
14773587cb87STomer Tayar 		qed_consq_setup(p_hwfn);
1478fe56b9e6SYuval Mintz 
1479fe56b9e6SYuval Mintz 		/* Read shadow of current MFW mailbox */
1480fe56b9e6SYuval Mintz 		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
1481fe56b9e6SYuval Mintz 		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
1482fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_cur,
1483fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_length);
1484fe56b9e6SYuval Mintz 
1485fe56b9e6SYuval Mintz 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
148632a47e72SYuval Mintz 
14870db711bbSMintz, Yuval 		qed_l2_setup(p_hwfn);
14881ee240e3SMintz, Yuval 		qed_iov_setup(p_hwfn);
14890a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2
14900a7fb11cSYuval Mintz 		if (p_hwfn->using_ll2)
14913587cb87STomer Tayar 			qed_ll2_setup(p_hwfn);
14920a7fb11cSYuval Mintz #endif
14931e128c81SArun Easi 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
14943587cb87STomer Tayar 			qed_fcoe_setup(p_hwfn);
14951e128c81SArun Easi 
14961d6cff4fSYuval Mintz 		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
14973587cb87STomer Tayar 			qed_iscsi_setup(p_hwfn);
14983587cb87STomer Tayar 			qed_ooo_setup(p_hwfn);
14991d6cff4fSYuval Mintz 		}
1500fe56b9e6SYuval Mintz 	}
1501fe56b9e6SYuval Mintz }
1502fe56b9e6SYuval Mintz 
1503fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_CNT          (100)
1504fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_TIME         (10)
1505fe56b9e6SYuval Mintz int qed_final_cleanup(struct qed_hwfn *p_hwfn,
15060b55e27dSYuval Mintz 		      struct qed_ptt *p_ptt, u16 id, bool is_vf)
1507fe56b9e6SYuval Mintz {
1508fe56b9e6SYuval Mintz 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
1509fe56b9e6SYuval Mintz 	int rc = -EBUSY;
1510fe56b9e6SYuval Mintz 
1511fc48b7a6SYuval Mintz 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
1512fc48b7a6SYuval Mintz 		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
1513fe56b9e6SYuval Mintz 
15140b55e27dSYuval Mintz 	if (is_vf)
15150b55e27dSYuval Mintz 		id += 0x10;
15160b55e27dSYuval Mintz 
1517fc48b7a6SYuval Mintz 	command |= X_FINAL_CLEANUP_AGG_INT <<
1518fc48b7a6SYuval Mintz 		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
1519fc48b7a6SYuval Mintz 	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
1520fc48b7a6SYuval Mintz 	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
1521fc48b7a6SYuval Mintz 	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
1522fe56b9e6SYuval Mintz 
1523fe56b9e6SYuval Mintz 	/* Make sure notification is not set before initiating final cleanup */
1524fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr)) {
15251a635e48SYuval Mintz 		DP_NOTICE(p_hwfn,
1526fe56b9e6SYuval Mintz 			  "Unexpected; Found final cleanup notification before initiating final cleanup\n");
1527fe56b9e6SYuval Mintz 		REG_WR(p_hwfn, addr, 0);
1528fe56b9e6SYuval Mintz 	}
1529fe56b9e6SYuval Mintz 
1530fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1531d602de8eSJoe Perches 		   "Sending final cleanup for PFVF[%d] [Command %08x]\n",
1532fe56b9e6SYuval Mintz 		   id, command);
1533fe56b9e6SYuval Mintz 
1534fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
1535fe56b9e6SYuval Mintz 
1536fe56b9e6SYuval Mintz 	/* Poll until completion */
1537fe56b9e6SYuval Mintz 	while (!REG_RD(p_hwfn, addr) && count--)
1538fe56b9e6SYuval Mintz 		msleep(FINAL_CLEANUP_POLL_TIME);
1539fe56b9e6SYuval Mintz 
1540fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr))
1541fe56b9e6SYuval Mintz 		rc = 0;
1542fe56b9e6SYuval Mintz 	else
1543fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
1544fe56b9e6SYuval Mintz 			  "Failed to receive FW final cleanup notification\n");
1545fe56b9e6SYuval Mintz 
1546fe56b9e6SYuval Mintz 	/* Cleanup afterwards */
1547fe56b9e6SYuval Mintz 	REG_WR(p_hwfn, addr, 0);
1548fe56b9e6SYuval Mintz 
1549fe56b9e6SYuval Mintz 	return rc;
1550fe56b9e6SYuval Mintz }
1551fe56b9e6SYuval Mintz 
15529c79ddaaSMintz, Yuval static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
1553fe56b9e6SYuval Mintz {
1554fe56b9e6SYuval Mintz 	int hw_mode = 0;
1555fe56b9e6SYuval Mintz 
15569c79ddaaSMintz, Yuval 	if (QED_IS_BB_B0(p_hwfn->cdev)) {
15579c79ddaaSMintz, Yuval 		hw_mode |= 1 << MODE_BB;
15589c79ddaaSMintz, Yuval 	} else if (QED_IS_AH(p_hwfn->cdev)) {
15599c79ddaaSMintz, Yuval 		hw_mode |= 1 << MODE_K2;
15609c79ddaaSMintz, Yuval 	} else {
15619c79ddaaSMintz, Yuval 		DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
15629c79ddaaSMintz, Yuval 			  p_hwfn->cdev->type);
15639c79ddaaSMintz, Yuval 		return -EINVAL;
15649c79ddaaSMintz, Yuval 	}
1565fe56b9e6SYuval Mintz 
156678cea9ffSTomer Tayar 	switch (p_hwfn->cdev->num_ports_in_engine) {
1567fe56b9e6SYuval Mintz 	case 1:
1568fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
1569fe56b9e6SYuval Mintz 		break;
1570fe56b9e6SYuval Mintz 	case 2:
1571fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
1572fe56b9e6SYuval Mintz 		break;
1573fe56b9e6SYuval Mintz 	case 4:
1574fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
1575fe56b9e6SYuval Mintz 		break;
1576fe56b9e6SYuval Mintz 	default:
1577fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
157878cea9ffSTomer Tayar 			  p_hwfn->cdev->num_ports_in_engine);
15799c79ddaaSMintz, Yuval 		return -EINVAL;
1580fe56b9e6SYuval Mintz 	}
1581fe56b9e6SYuval Mintz 
15820bc5fe85SSudarsana Reddy Kalluru 	if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
1583fc48b7a6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SD;
15840bc5fe85SSudarsana Reddy Kalluru 	else
1585fc48b7a6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SI;
1586fe56b9e6SYuval Mintz 
1587fe56b9e6SYuval Mintz 	hw_mode |= 1 << MODE_ASIC;
1588fe56b9e6SYuval Mintz 
15891af9dcf7SYuval Mintz 	if (p_hwfn->cdev->num_hwfns > 1)
15901af9dcf7SYuval Mintz 		hw_mode |= 1 << MODE_100G;
15911af9dcf7SYuval Mintz 
1592fe56b9e6SYuval Mintz 	p_hwfn->hw_info.hw_mode = hw_mode;
15931af9dcf7SYuval Mintz 
15941af9dcf7SYuval Mintz 	DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
15951af9dcf7SYuval Mintz 		   "Configuring function for hw_mode: 0x%08x\n",
15961af9dcf7SYuval Mintz 		   p_hwfn->hw_info.hw_mode);
15979c79ddaaSMintz, Yuval 
15989c79ddaaSMintz, Yuval 	return 0;
1599fe56b9e6SYuval Mintz }
1600fe56b9e6SYuval Mintz 
1601fe56b9e6SYuval Mintz /* Init run time data for all PFs on an engine. */
1602fe56b9e6SYuval Mintz static void qed_init_cau_rt_data(struct qed_dev *cdev)
1603fe56b9e6SYuval Mintz {
1604fe56b9e6SYuval Mintz 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
1605d031548eSMintz, Yuval 	int i, igu_sb_id;
1606fe56b9e6SYuval Mintz 
1607fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1608fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1609fe56b9e6SYuval Mintz 		struct qed_igu_info *p_igu_info;
1610fe56b9e6SYuval Mintz 		struct qed_igu_block *p_block;
1611fe56b9e6SYuval Mintz 		struct cau_sb_entry sb_entry;
1612fe56b9e6SYuval Mintz 
1613fe56b9e6SYuval Mintz 		p_igu_info = p_hwfn->hw_info.p_igu_info;
1614fe56b9e6SYuval Mintz 
1615d031548eSMintz, Yuval 		for (igu_sb_id = 0;
1616d031548eSMintz, Yuval 		     igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) {
1617d031548eSMintz, Yuval 			p_block = &p_igu_info->entry[igu_sb_id];
1618d031548eSMintz, Yuval 
1619fe56b9e6SYuval Mintz 			if (!p_block->is_pf)
1620fe56b9e6SYuval Mintz 				continue;
1621fe56b9e6SYuval Mintz 
1622fe56b9e6SYuval Mintz 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
16231a635e48SYuval Mintz 					      p_block->function_id, 0, 0);
1624d031548eSMintz, Yuval 			STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
1625d031548eSMintz, Yuval 					 sb_entry);
1626fe56b9e6SYuval Mintz 		}
1627fe56b9e6SYuval Mintz 	}
1628fe56b9e6SYuval Mintz }
1629fe56b9e6SYuval Mintz 
163060afed72STomer Tayar static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
163160afed72STomer Tayar 				     struct qed_ptt *p_ptt)
163260afed72STomer Tayar {
163360afed72STomer Tayar 	u32 val, wr_mbs, cache_line_size;
163460afed72STomer Tayar 
163560afed72STomer Tayar 	val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
163660afed72STomer Tayar 	switch (val) {
163760afed72STomer Tayar 	case 0:
163860afed72STomer Tayar 		wr_mbs = 128;
163960afed72STomer Tayar 		break;
164060afed72STomer Tayar 	case 1:
164160afed72STomer Tayar 		wr_mbs = 256;
164260afed72STomer Tayar 		break;
164360afed72STomer Tayar 	case 2:
164460afed72STomer Tayar 		wr_mbs = 512;
164560afed72STomer Tayar 		break;
164660afed72STomer Tayar 	default:
164760afed72STomer Tayar 		DP_INFO(p_hwfn,
164860afed72STomer Tayar 			"Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
164960afed72STomer Tayar 			val);
165060afed72STomer Tayar 		return;
165160afed72STomer Tayar 	}
165260afed72STomer Tayar 
165360afed72STomer Tayar 	cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs);
165460afed72STomer Tayar 	switch (cache_line_size) {
165560afed72STomer Tayar 	case 32:
165660afed72STomer Tayar 		val = 0;
165760afed72STomer Tayar 		break;
165860afed72STomer Tayar 	case 64:
165960afed72STomer Tayar 		val = 1;
166060afed72STomer Tayar 		break;
166160afed72STomer Tayar 	case 128:
166260afed72STomer Tayar 		val = 2;
166360afed72STomer Tayar 		break;
166460afed72STomer Tayar 	case 256:
166560afed72STomer Tayar 		val = 3;
166660afed72STomer Tayar 		break;
166760afed72STomer Tayar 	default:
166860afed72STomer Tayar 		DP_INFO(p_hwfn,
166960afed72STomer Tayar 			"Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
167060afed72STomer Tayar 			cache_line_size);
167160afed72STomer Tayar 	}
167260afed72STomer Tayar 
167360afed72STomer Tayar 	if (L1_CACHE_BYTES > wr_mbs)
167460afed72STomer Tayar 		DP_INFO(p_hwfn,
167560afed72STomer Tayar 			"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
167660afed72STomer Tayar 			L1_CACHE_BYTES, wr_mbs);
167760afed72STomer Tayar 
167860afed72STomer Tayar 	STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
1679fc6575bcSMintz, Yuval 	if (val > 0) {
1680fc6575bcSMintz, Yuval 		STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
1681fc6575bcSMintz, Yuval 		STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
1682fc6575bcSMintz, Yuval 	}
168360afed72STomer Tayar }
168460afed72STomer Tayar 
1685fe56b9e6SYuval Mintz static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
16861a635e48SYuval Mintz 			      struct qed_ptt *p_ptt, int hw_mode)
1687fe56b9e6SYuval Mintz {
1688fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1689fe56b9e6SYuval Mintz 	struct qed_qm_common_rt_init_params params;
1690fe56b9e6SYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
16919c79ddaaSMintz, Yuval 	u8 vf_id, max_num_vfs;
1692dbb799c3SYuval Mintz 	u16 num_pfs, pf_id;
16931408cc1fSYuval Mintz 	u32 concrete_fid;
1694fe56b9e6SYuval Mintz 	int rc = 0;
1695fe56b9e6SYuval Mintz 
1696fe56b9e6SYuval Mintz 	qed_init_cau_rt_data(cdev);
1697fe56b9e6SYuval Mintz 
1698fe56b9e6SYuval Mintz 	/* Program GTT windows */
1699fe56b9e6SYuval Mintz 	qed_gtt_init(p_hwfn);
1700fe56b9e6SYuval Mintz 
1701fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
1702fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_max)
1703c7281d59SGustavo A. R. Silva 			qm_info->pf_rl_en = true;
1704fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_min)
1705c7281d59SGustavo A. R. Silva 			qm_info->pf_wfq_en = true;
1706fe56b9e6SYuval Mintz 	}
1707fe56b9e6SYuval Mintz 
1708fe56b9e6SYuval Mintz 	memset(&params, 0, sizeof(params));
170978cea9ffSTomer Tayar 	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
1710fe56b9e6SYuval Mintz 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1711fe56b9e6SYuval Mintz 	params.pf_rl_en = qm_info->pf_rl_en;
1712fe56b9e6SYuval Mintz 	params.pf_wfq_en = qm_info->pf_wfq_en;
1713fe56b9e6SYuval Mintz 	params.vport_rl_en = qm_info->vport_rl_en;
1714fe56b9e6SYuval Mintz 	params.vport_wfq_en = qm_info->vport_wfq_en;
1715fe56b9e6SYuval Mintz 	params.port_params = qm_info->qm_port_params;
1716fe56b9e6SYuval Mintz 
1717fe56b9e6SYuval Mintz 	qed_qm_common_rt_init(p_hwfn, &params);
1718fe56b9e6SYuval Mintz 
1719fe56b9e6SYuval Mintz 	qed_cxt_hw_init_common(p_hwfn);
1720fe56b9e6SYuval Mintz 
172160afed72STomer Tayar 	qed_init_cache_line_size(p_hwfn, p_ptt);
172260afed72STomer Tayar 
1723fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
17241a635e48SYuval Mintz 	if (rc)
1725fe56b9e6SYuval Mintz 		return rc;
1726fe56b9e6SYuval Mintz 
1727fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
1728fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
1729fe56b9e6SYuval Mintz 
1730dbb799c3SYuval Mintz 	if (QED_IS_BB(p_hwfn->cdev)) {
1731dbb799c3SYuval Mintz 		num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev);
1732dbb799c3SYuval Mintz 		for (pf_id = 0; pf_id < num_pfs; pf_id++) {
1733dbb799c3SYuval Mintz 			qed_fid_pretend(p_hwfn, p_ptt, pf_id);
1734dbb799c3SYuval Mintz 			qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1735dbb799c3SYuval Mintz 			qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1736dbb799c3SYuval Mintz 		}
1737dbb799c3SYuval Mintz 		/* pretend to original PF */
1738dbb799c3SYuval Mintz 		qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
1739dbb799c3SYuval Mintz 	}
1740fe56b9e6SYuval Mintz 
17419c79ddaaSMintz, Yuval 	max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
17429c79ddaaSMintz, Yuval 	for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
17431408cc1fSYuval Mintz 		concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
17441408cc1fSYuval Mintz 		qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
17451408cc1fSYuval Mintz 		qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
174605fafbfbSYuval Mintz 		qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
174705fafbfbSYuval Mintz 		qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
174805fafbfbSYuval Mintz 		qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
17491408cc1fSYuval Mintz 	}
17501408cc1fSYuval Mintz 	/* pretend to original PF */
17511408cc1fSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
17521408cc1fSYuval Mintz 
1753fe56b9e6SYuval Mintz 	return rc;
1754fe56b9e6SYuval Mintz }
1755fe56b9e6SYuval Mintz 
175651ff1725SRam Amrani static int
175751ff1725SRam Amrani qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
175851ff1725SRam Amrani 		     struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
175951ff1725SRam Amrani {
1760107392b7SRam Amrani 	u32 dpi_bit_shift, dpi_count, dpi_page_size;
176151ff1725SRam Amrani 	u32 min_dpis;
1762107392b7SRam Amrani 	u32 n_wids;
176351ff1725SRam Amrani 
176451ff1725SRam Amrani 	/* Calculate DPI size */
1765107392b7SRam Amrani 	n_wids = max_t(u32, QED_MIN_WIDS, n_cpus);
1766107392b7SRam Amrani 	dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids);
1767107392b7SRam Amrani 	dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
176851ff1725SRam Amrani 	dpi_bit_shift = ilog2(dpi_page_size / 4096);
176951ff1725SRam Amrani 	dpi_count = pwm_region_size / dpi_page_size;
177051ff1725SRam Amrani 
177151ff1725SRam Amrani 	min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
177251ff1725SRam Amrani 	min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
177351ff1725SRam Amrani 
177451ff1725SRam Amrani 	p_hwfn->dpi_size = dpi_page_size;
177551ff1725SRam Amrani 	p_hwfn->dpi_count = dpi_count;
177651ff1725SRam Amrani 
177751ff1725SRam Amrani 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
177851ff1725SRam Amrani 
177951ff1725SRam Amrani 	if (dpi_count < min_dpis)
178051ff1725SRam Amrani 		return -EINVAL;
178151ff1725SRam Amrani 
178251ff1725SRam Amrani 	return 0;
178351ff1725SRam Amrani }
178451ff1725SRam Amrani 
178551ff1725SRam Amrani enum QED_ROCE_EDPM_MODE {
178651ff1725SRam Amrani 	QED_ROCE_EDPM_MODE_ENABLE = 0,
178751ff1725SRam Amrani 	QED_ROCE_EDPM_MODE_FORCE_ON = 1,
178851ff1725SRam Amrani 	QED_ROCE_EDPM_MODE_DISABLE = 2,
178951ff1725SRam Amrani };
179051ff1725SRam Amrani 
1791a1b469b8SAriel Elior bool qed_edpm_enabled(struct qed_hwfn *p_hwfn)
1792a1b469b8SAriel Elior {
1793a1b469b8SAriel Elior 	if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
1794a1b469b8SAriel Elior 		return false;
1795a1b469b8SAriel Elior 
1796a1b469b8SAriel Elior 	return true;
1797a1b469b8SAriel Elior }
1798a1b469b8SAriel Elior 
179951ff1725SRam Amrani static int
180051ff1725SRam Amrani qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
180151ff1725SRam Amrani {
180251ff1725SRam Amrani 	u32 pwm_regsize, norm_regsize;
180351ff1725SRam Amrani 	u32 non_pwm_conn, min_addr_reg1;
180420b1bd96SRam Amrani 	u32 db_bar_size, n_cpus = 1;
180551ff1725SRam Amrani 	u32 roce_edpm_mode;
180651ff1725SRam Amrani 	u32 pf_dems_shift;
180751ff1725SRam Amrani 	int rc = 0;
180851ff1725SRam Amrani 	u8 cond;
180951ff1725SRam Amrani 
181015582962SRahul Verma 	db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
181151ff1725SRam Amrani 	if (p_hwfn->cdev->num_hwfns > 1)
181251ff1725SRam Amrani 		db_bar_size /= 2;
181351ff1725SRam Amrani 
181451ff1725SRam Amrani 	/* Calculate doorbell regions */
181551ff1725SRam Amrani 	non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
181651ff1725SRam Amrani 		       qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
181751ff1725SRam Amrani 						   NULL) +
181851ff1725SRam Amrani 		       qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
181951ff1725SRam Amrani 						   NULL);
1820a82dadbcSRam Amrani 	norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE);
182151ff1725SRam Amrani 	min_addr_reg1 = norm_regsize / 4096;
182251ff1725SRam Amrani 	pwm_regsize = db_bar_size - norm_regsize;
182351ff1725SRam Amrani 
182451ff1725SRam Amrani 	/* Check that the normal and PWM sizes are valid */
182551ff1725SRam Amrani 	if (db_bar_size < norm_regsize) {
182651ff1725SRam Amrani 		DP_ERR(p_hwfn->cdev,
182751ff1725SRam Amrani 		       "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
182851ff1725SRam Amrani 		       db_bar_size, norm_regsize);
182951ff1725SRam Amrani 		return -EINVAL;
183051ff1725SRam Amrani 	}
183151ff1725SRam Amrani 
183251ff1725SRam Amrani 	if (pwm_regsize < QED_MIN_PWM_REGION) {
183351ff1725SRam Amrani 		DP_ERR(p_hwfn->cdev,
183451ff1725SRam Amrani 		       "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
183551ff1725SRam Amrani 		       pwm_regsize,
183651ff1725SRam Amrani 		       QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
183751ff1725SRam Amrani 		return -EINVAL;
183851ff1725SRam Amrani 	}
183951ff1725SRam Amrani 
184051ff1725SRam Amrani 	/* Calculate number of DPIs */
184151ff1725SRam Amrani 	roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
184251ff1725SRam Amrani 	if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
184351ff1725SRam Amrani 	    ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
184451ff1725SRam Amrani 		/* Either EDPM is mandatory, or we are attempting to allocate a
184551ff1725SRam Amrani 		 * WID per CPU.
184651ff1725SRam Amrani 		 */
1847c2dedf87SRam Amrani 		n_cpus = num_present_cpus();
184851ff1725SRam Amrani 		rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
184951ff1725SRam Amrani 	}
185051ff1725SRam Amrani 
185151ff1725SRam Amrani 	cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
185251ff1725SRam Amrani 	       (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
185351ff1725SRam Amrani 	if (cond || p_hwfn->dcbx_no_edpm) {
185451ff1725SRam Amrani 		/* Either EDPM is disabled from user configuration, or it is
185551ff1725SRam Amrani 		 * disabled via DCBx, or it is not mandatory and we failed to
185651ff1725SRam Amrani 		 * allocated a WID per CPU.
185751ff1725SRam Amrani 		 */
185851ff1725SRam Amrani 		n_cpus = 1;
185951ff1725SRam Amrani 		rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
186051ff1725SRam Amrani 
186151ff1725SRam Amrani 		if (cond)
186251ff1725SRam Amrani 			qed_rdma_dpm_bar(p_hwfn, p_ptt);
186351ff1725SRam Amrani 	}
186451ff1725SRam Amrani 
186520b1bd96SRam Amrani 	p_hwfn->wid_count = (u16) n_cpus;
186620b1bd96SRam Amrani 
186751ff1725SRam Amrani 	DP_INFO(p_hwfn,
1868a1b469b8SAriel Elior 		"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
186951ff1725SRam Amrani 		norm_regsize,
187051ff1725SRam Amrani 		pwm_regsize,
187151ff1725SRam Amrani 		p_hwfn->dpi_size,
187251ff1725SRam Amrani 		p_hwfn->dpi_count,
1873a1b469b8SAriel Elior 		(!qed_edpm_enabled(p_hwfn)) ?
1874a1b469b8SAriel Elior 		"disabled" : "enabled", PAGE_SIZE);
187551ff1725SRam Amrani 
187651ff1725SRam Amrani 	if (rc) {
187751ff1725SRam Amrani 		DP_ERR(p_hwfn,
187851ff1725SRam Amrani 		       "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
187951ff1725SRam Amrani 		       p_hwfn->dpi_count,
188051ff1725SRam Amrani 		       p_hwfn->pf_params.rdma_pf_params.min_dpis);
188151ff1725SRam Amrani 		return -EINVAL;
188251ff1725SRam Amrani 	}
188351ff1725SRam Amrani 
188451ff1725SRam Amrani 	p_hwfn->dpi_start_offset = norm_regsize;
188551ff1725SRam Amrani 
188651ff1725SRam Amrani 	/* DEMS size is configured log2 of DWORDs, hence the division by 4 */
188751ff1725SRam Amrani 	pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
188851ff1725SRam Amrani 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
188951ff1725SRam Amrani 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
189051ff1725SRam Amrani 
189151ff1725SRam Amrani 	return 0;
189251ff1725SRam Amrani }
189351ff1725SRam Amrani 
1894fe56b9e6SYuval Mintz static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
18951a635e48SYuval Mintz 			    struct qed_ptt *p_ptt, int hw_mode)
1896fe56b9e6SYuval Mintz {
1897fc6575bcSMintz, Yuval 	int rc = 0;
1898fc6575bcSMintz, Yuval 
1899fc6575bcSMintz, Yuval 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
1900fc6575bcSMintz, Yuval 	if (rc)
1901fc6575bcSMintz, Yuval 		return rc;
1902fc6575bcSMintz, Yuval 
1903fc6575bcSMintz, Yuval 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
1904fc6575bcSMintz, Yuval 
1905fc6575bcSMintz, Yuval 	return 0;
1906fe56b9e6SYuval Mintz }
1907fe56b9e6SYuval Mintz 
1908fe56b9e6SYuval Mintz static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
1909fe56b9e6SYuval Mintz 			  struct qed_ptt *p_ptt,
191019968430SChopra, Manish 			  struct qed_tunnel_info *p_tunn,
1911fe56b9e6SYuval Mintz 			  int hw_mode,
1912fe56b9e6SYuval Mintz 			  bool b_hw_start,
1913fe56b9e6SYuval Mintz 			  enum qed_int_mode int_mode,
1914fe56b9e6SYuval Mintz 			  bool allow_npar_tx_switch)
1915fe56b9e6SYuval Mintz {
1916fe56b9e6SYuval Mintz 	u8 rel_pf_id = p_hwfn->rel_pf_id;
1917fe56b9e6SYuval Mintz 	int rc = 0;
1918fe56b9e6SYuval Mintz 
1919fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
1920fe56b9e6SYuval Mintz 		struct qed_mcp_function_info *p_info;
1921fe56b9e6SYuval Mintz 
1922fe56b9e6SYuval Mintz 		p_info = &p_hwfn->mcp_info->func_info;
1923fe56b9e6SYuval Mintz 		if (p_info->bandwidth_min)
1924fe56b9e6SYuval Mintz 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
1925fe56b9e6SYuval Mintz 
1926fe56b9e6SYuval Mintz 		/* Update rate limit once we'll actually have a link */
19274b01e519SManish Chopra 		p_hwfn->qm_info.pf_rl = 100000;
1928fe56b9e6SYuval Mintz 	}
1929fe56b9e6SYuval Mintz 
193015582962SRahul Verma 	qed_cxt_hw_init_pf(p_hwfn, p_ptt);
1931fe56b9e6SYuval Mintz 
1932fe56b9e6SYuval Mintz 	qed_int_igu_init_rt(p_hwfn);
1933fe56b9e6SYuval Mintz 
1934fe56b9e6SYuval Mintz 	/* Set VLAN in NIG if needed */
19351a635e48SYuval Mintz 	if (hw_mode & BIT(MODE_MF_SD)) {
1936fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
1937fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
1938fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
1939fe56b9e6SYuval Mintz 			     p_hwfn->hw_info.ovlan);
1940cac6f691SSudarsana Reddy Kalluru 
1941cac6f691SSudarsana Reddy Kalluru 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
1942cac6f691SSudarsana Reddy Kalluru 			   "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
1943cac6f691SSudarsana Reddy Kalluru 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
1944cac6f691SSudarsana Reddy Kalluru 			     1);
1945fe56b9e6SYuval Mintz 	}
1946fe56b9e6SYuval Mintz 
1947fe56b9e6SYuval Mintz 	/* Enable classification by MAC if needed */
19481a635e48SYuval Mintz 	if (hw_mode & BIT(MODE_MF_SI)) {
1949fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
1950fe56b9e6SYuval Mintz 			   "Configuring TAGMAC_CLS_TYPE\n");
1951fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn,
1952fe56b9e6SYuval Mintz 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
1953fe56b9e6SYuval Mintz 	}
1954fe56b9e6SYuval Mintz 
1955a2e7699eSTomer Tayar 	/* Protocol Configuration */
1956dbb799c3SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
1957dbb799c3SYuval Mintz 		     (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
19581e128c81SArun Easi 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
19591e128c81SArun Easi 		     (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
1960fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
1961fe56b9e6SYuval Mintz 
1962da090917STomer Tayar 	/* Sanity check before the PF init sequence that uses DMAE */
1963da090917STomer Tayar 	rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
1964da090917STomer Tayar 	if (rc)
1965da090917STomer Tayar 		return rc;
1966da090917STomer Tayar 
1967fe56b9e6SYuval Mintz 	/* PF Init sequence */
1968fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
1969fe56b9e6SYuval Mintz 	if (rc)
1970fe56b9e6SYuval Mintz 		return rc;
1971fe56b9e6SYuval Mintz 
1972fe56b9e6SYuval Mintz 	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
1973fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
1974fe56b9e6SYuval Mintz 	if (rc)
1975fe56b9e6SYuval Mintz 		return rc;
1976fe56b9e6SYuval Mintz 
1977fe56b9e6SYuval Mintz 	/* Pure runtime initializations - directly to the HW  */
1978fe56b9e6SYuval Mintz 	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
1979fe56b9e6SYuval Mintz 
198051ff1725SRam Amrani 	rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
198151ff1725SRam Amrani 	if (rc)
198251ff1725SRam Amrani 		return rc;
198351ff1725SRam Amrani 
1984fe56b9e6SYuval Mintz 	if (b_hw_start) {
1985fe56b9e6SYuval Mintz 		/* enable interrupts */
1986fe56b9e6SYuval Mintz 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
1987fe56b9e6SYuval Mintz 
1988fe56b9e6SYuval Mintz 		/* send function start command */
19894f64675fSManish Chopra 		rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
1990831bfb0eSYuval Mintz 				     allow_npar_tx_switch);
19911e128c81SArun Easi 		if (rc) {
1992fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
19931e128c81SArun Easi 			return rc;
19941e128c81SArun Easi 		}
19951e128c81SArun Easi 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
19961e128c81SArun Easi 			qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2));
19971e128c81SArun Easi 			qed_wr(p_hwfn, p_ptt,
19981e128c81SArun Easi 			       PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
19991e128c81SArun Easi 			       0x100);
20001e128c81SArun Easi 		}
2001fe56b9e6SYuval Mintz 	}
2002fe56b9e6SYuval Mintz 	return rc;
2003fe56b9e6SYuval Mintz }
2004fe56b9e6SYuval Mintz 
2005666db486STomer Tayar int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
2006666db486STomer Tayar 			       struct qed_ptt *p_ptt, bool b_enable)
2007fe56b9e6SYuval Mintz {
2008666db486STomer Tayar 	u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
2009fe56b9e6SYuval Mintz 
2010666db486STomer Tayar 	/* Configure the PF's internal FID_enable for master transactions */
2011666db486STomer Tayar 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
2012fe56b9e6SYuval Mintz 
2013666db486STomer Tayar 	/* Wait until value is set - try for 1 second every 50us */
2014fe56b9e6SYuval Mintz 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
2015fe56b9e6SYuval Mintz 		val = qed_rd(p_hwfn, p_ptt,
2016fe56b9e6SYuval Mintz 			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
2017fe56b9e6SYuval Mintz 		if (val == set_val)
2018fe56b9e6SYuval Mintz 			break;
2019fe56b9e6SYuval Mintz 
2020fe56b9e6SYuval Mintz 		usleep_range(50, 60);
2021fe56b9e6SYuval Mintz 	}
2022fe56b9e6SYuval Mintz 
2023fe56b9e6SYuval Mintz 	if (val != set_val) {
2024fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
2025fe56b9e6SYuval Mintz 			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
2026fe56b9e6SYuval Mintz 		return -EAGAIN;
2027fe56b9e6SYuval Mintz 	}
2028fe56b9e6SYuval Mintz 
2029fe56b9e6SYuval Mintz 	return 0;
2030fe56b9e6SYuval Mintz }
2031fe56b9e6SYuval Mintz 
2032fe56b9e6SYuval Mintz static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
2033fe56b9e6SYuval Mintz 				struct qed_ptt *p_main_ptt)
2034fe56b9e6SYuval Mintz {
2035fe56b9e6SYuval Mintz 	/* Read shadow of current MFW mailbox */
2036fe56b9e6SYuval Mintz 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
2037fe56b9e6SYuval Mintz 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
20381a635e48SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
2039fe56b9e6SYuval Mintz }
2040fe56b9e6SYuval Mintz 
20415d24bcf1STomer Tayar static void
20425d24bcf1STomer Tayar qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
20435d24bcf1STomer Tayar 			 struct qed_drv_load_params *p_drv_load)
20445d24bcf1STomer Tayar {
20455d24bcf1STomer Tayar 	memset(p_load_req, 0, sizeof(*p_load_req));
20465d24bcf1STomer Tayar 
20475d24bcf1STomer Tayar 	p_load_req->drv_role = p_drv_load->is_crash_kernel ?
20485d24bcf1STomer Tayar 			       QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
20495d24bcf1STomer Tayar 	p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
20505d24bcf1STomer Tayar 	p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
20515d24bcf1STomer Tayar 	p_load_req->override_force_load = p_drv_load->override_force_load;
20525d24bcf1STomer Tayar }
20535d24bcf1STomer Tayar 
2054eaf3c0c6SChopra, Manish static int qed_vf_start(struct qed_hwfn *p_hwfn,
2055eaf3c0c6SChopra, Manish 			struct qed_hw_init_params *p_params)
2056eaf3c0c6SChopra, Manish {
2057eaf3c0c6SChopra, Manish 	if (p_params->p_tunn) {
2058eaf3c0c6SChopra, Manish 		qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
2059eaf3c0c6SChopra, Manish 		qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
2060eaf3c0c6SChopra, Manish 	}
2061eaf3c0c6SChopra, Manish 
2062c7281d59SGustavo A. R. Silva 	p_hwfn->b_int_enabled = true;
2063eaf3c0c6SChopra, Manish 
2064eaf3c0c6SChopra, Manish 	return 0;
2065eaf3c0c6SChopra, Manish }
2066eaf3c0c6SChopra, Manish 
2067666db486STomer Tayar static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2068666db486STomer Tayar {
2069666db486STomer Tayar 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
2070666db486STomer Tayar 	       BIT(p_hwfn->abs_pf_id));
2071666db486STomer Tayar }
2072666db486STomer Tayar 
2073c0c2d0b4SMintz, Yuval int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
2074fe56b9e6SYuval Mintz {
20755d24bcf1STomer Tayar 	struct qed_load_req_params load_req_params;
207650fdf601SSudarsana Reddy Kalluru 	u32 load_code, resp, param, drv_mb_param;
20770fefbfbaSSudarsana Kalluru 	bool b_default_mtu = true;
20780fefbfbaSSudarsana Kalluru 	struct qed_hwfn *p_hwfn;
2079666db486STomer Tayar 	int rc = 0, i;
2080cac6f691SSudarsana Reddy Kalluru 	u16 ether_type;
2081fe56b9e6SYuval Mintz 
2082c0c2d0b4SMintz, Yuval 	if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
2083bb13ace7SSudarsana Reddy Kalluru 		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
2084bb13ace7SSudarsana Reddy Kalluru 		return -EINVAL;
2085bb13ace7SSudarsana Reddy Kalluru 	}
2086bb13ace7SSudarsana Reddy Kalluru 
20871408cc1fSYuval Mintz 	if (IS_PF(cdev)) {
2088c0c2d0b4SMintz, Yuval 		rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
20891a635e48SYuval Mintz 		if (rc)
2090fe56b9e6SYuval Mintz 			return rc;
20911408cc1fSYuval Mintz 	}
2092fe56b9e6SYuval Mintz 
2093fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
2094666db486STomer Tayar 		p_hwfn = &cdev->hwfns[i];
2095fe56b9e6SYuval Mintz 
20960fefbfbaSSudarsana Kalluru 		/* If management didn't provide a default, set one of our own */
20970fefbfbaSSudarsana Kalluru 		if (!p_hwfn->hw_info.mtu) {
20980fefbfbaSSudarsana Kalluru 			p_hwfn->hw_info.mtu = 1500;
20990fefbfbaSSudarsana Kalluru 			b_default_mtu = false;
21000fefbfbaSSudarsana Kalluru 		}
21010fefbfbaSSudarsana Kalluru 
21021408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
2103eaf3c0c6SChopra, Manish 			qed_vf_start(p_hwfn, p_params);
21041408cc1fSYuval Mintz 			continue;
21051408cc1fSYuval Mintz 		}
21061408cc1fSYuval Mintz 
21079c79ddaaSMintz, Yuval 		rc = qed_calc_hw_mode(p_hwfn);
21089c79ddaaSMintz, Yuval 		if (rc)
21099c79ddaaSMintz, Yuval 			return rc;
2110fe56b9e6SYuval Mintz 
2111cac6f691SSudarsana Reddy Kalluru 		if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING,
2112cac6f691SSudarsana Reddy Kalluru 					     &cdev->mf_bits) ||
2113cac6f691SSudarsana Reddy Kalluru 				    test_bit(QED_MF_8021AD_TAGGING,
2114cac6f691SSudarsana Reddy Kalluru 					     &cdev->mf_bits))) {
2115cac6f691SSudarsana Reddy Kalluru 			if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits))
2116cac6f691SSudarsana Reddy Kalluru 				ether_type = ETH_P_8021Q;
2117cac6f691SSudarsana Reddy Kalluru 			else
2118cac6f691SSudarsana Reddy Kalluru 				ether_type = ETH_P_8021AD;
2119b51bdfb9SSudarsana Reddy Kalluru 			STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
2120cac6f691SSudarsana Reddy Kalluru 				     ether_type);
2121b51bdfb9SSudarsana Reddy Kalluru 			STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
2122cac6f691SSudarsana Reddy Kalluru 				     ether_type);
2123b51bdfb9SSudarsana Reddy Kalluru 			STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
2124cac6f691SSudarsana Reddy Kalluru 				     ether_type);
2125b51bdfb9SSudarsana Reddy Kalluru 			STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
2126cac6f691SSudarsana Reddy Kalluru 				     ether_type);
2127b51bdfb9SSudarsana Reddy Kalluru 		}
2128b51bdfb9SSudarsana Reddy Kalluru 
21295d24bcf1STomer Tayar 		qed_fill_load_req_params(&load_req_params,
21305d24bcf1STomer Tayar 					 p_params->p_drv_load_params);
21315d24bcf1STomer Tayar 		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
21325d24bcf1STomer Tayar 				      &load_req_params);
2133fe56b9e6SYuval Mintz 		if (rc) {
21345d24bcf1STomer Tayar 			DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
2135fe56b9e6SYuval Mintz 			return rc;
2136fe56b9e6SYuval Mintz 		}
2137fe56b9e6SYuval Mintz 
21385d24bcf1STomer Tayar 		load_code = load_req_params.load_code;
2139fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
21405d24bcf1STomer Tayar 			   "Load request was sent. Load code: 0x%x\n",
21415d24bcf1STomer Tayar 			   load_code);
21425d24bcf1STomer Tayar 
214364515dc8STomer Tayar 		/* Only relevant for recovery:
214464515dc8STomer Tayar 		 * Clear the indication after LOAD_REQ is responded by the MFW.
214564515dc8STomer Tayar 		 */
214664515dc8STomer Tayar 		cdev->recov_in_prog = false;
214764515dc8STomer Tayar 
2148645874e5SSudarsana Reddy Kalluru 		qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
2149645874e5SSudarsana Reddy Kalluru 
21505d24bcf1STomer Tayar 		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
2151fe56b9e6SYuval Mintz 
2152666db486STomer Tayar 		/* Clean up chip from previous driver if such remains exist.
2153666db486STomer Tayar 		 * This is not needed when the PF is the first one on the
2154666db486STomer Tayar 		 * engine, since afterwards we are going to init the FW.
2155666db486STomer Tayar 		 */
2156666db486STomer Tayar 		if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
2157666db486STomer Tayar 			rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
2158666db486STomer Tayar 					       p_hwfn->rel_pf_id, false);
2159666db486STomer Tayar 			if (rc) {
2160666db486STomer Tayar 				DP_NOTICE(p_hwfn, "Final cleanup failed\n");
2161666db486STomer Tayar 				goto load_err;
2162666db486STomer Tayar 			}
2163666db486STomer Tayar 		}
2164666db486STomer Tayar 
2165666db486STomer Tayar 		/* Log and clear previous pglue_b errors if such exist */
2166666db486STomer Tayar 		qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
2167666db486STomer Tayar 
2168666db486STomer Tayar 		/* Enable the PF's internal FID_enable in the PXP */
2169666db486STomer Tayar 		rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
2170666db486STomer Tayar 						true);
2171666db486STomer Tayar 		if (rc)
2172666db486STomer Tayar 			goto load_err;
2173666db486STomer Tayar 
2174666db486STomer Tayar 		/* Clear the pglue_b was_error indication.
2175666db486STomer Tayar 		 * In E4 it must be done after the BME and the internal
2176666db486STomer Tayar 		 * FID_enable for the PF are set, since VDMs may cause the
2177666db486STomer Tayar 		 * indication to be set again.
2178666db486STomer Tayar 		 */
2179666db486STomer Tayar 		qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
2180fe56b9e6SYuval Mintz 
2181fe56b9e6SYuval Mintz 		switch (load_code) {
2182fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
2183fe56b9e6SYuval Mintz 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
2184fe56b9e6SYuval Mintz 						p_hwfn->hw_info.hw_mode);
2185fe56b9e6SYuval Mintz 			if (rc)
2186fe56b9e6SYuval Mintz 				break;
218753a42286SGustavo A. R. Silva 		/* Fall through */
2188fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_PORT:
2189fe56b9e6SYuval Mintz 			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
2190fe56b9e6SYuval Mintz 					      p_hwfn->hw_info.hw_mode);
2191fe56b9e6SYuval Mintz 			if (rc)
2192fe56b9e6SYuval Mintz 				break;
2193fe56b9e6SYuval Mintz 
219453a42286SGustavo A. R. Silva 		/* Fall through */
2195fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
2196fe56b9e6SYuval Mintz 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
2197c0c2d0b4SMintz, Yuval 					    p_params->p_tunn,
2198c0c2d0b4SMintz, Yuval 					    p_hwfn->hw_info.hw_mode,
2199c0c2d0b4SMintz, Yuval 					    p_params->b_hw_start,
2200c0c2d0b4SMintz, Yuval 					    p_params->int_mode,
2201c0c2d0b4SMintz, Yuval 					    p_params->allow_npar_tx_switch);
2202fe56b9e6SYuval Mintz 			break;
2203fe56b9e6SYuval Mintz 		default:
2204c0c2d0b4SMintz, Yuval 			DP_NOTICE(p_hwfn,
2205c0c2d0b4SMintz, Yuval 				  "Unexpected load code [0x%08x]", load_code);
2206fe56b9e6SYuval Mintz 			rc = -EINVAL;
2207fe56b9e6SYuval Mintz 			break;
2208fe56b9e6SYuval Mintz 		}
2209fe56b9e6SYuval Mintz 
2210666db486STomer Tayar 		if (rc) {
2211fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
2212fe56b9e6SYuval Mintz 				  "init phase failed for loadcode 0x%x (rc %d)\n",
2213fe56b9e6SYuval Mintz 				  load_code, rc);
2214666db486STomer Tayar 			goto load_err;
2215fe56b9e6SYuval Mintz 		}
2216fe56b9e6SYuval Mintz 
2217666db486STomer Tayar 		rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
2218666db486STomer Tayar 		if (rc)
2219666db486STomer Tayar 			return rc;
2220fc561c8bSTomer Tayar 
222139651abdSSudarsana Reddy Kalluru 		/* send DCBX attention request command */
222239651abdSSudarsana Reddy Kalluru 		DP_VERBOSE(p_hwfn,
222339651abdSSudarsana Reddy Kalluru 			   QED_MSG_DCB,
222439651abdSSudarsana Reddy Kalluru 			   "sending phony dcbx set command to trigger DCBx attention handling\n");
2225666db486STomer Tayar 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
222639651abdSSudarsana Reddy Kalluru 				 DRV_MSG_CODE_SET_DCBX,
222739651abdSSudarsana Reddy Kalluru 				 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
2228666db486STomer Tayar 				 &resp, &param);
2229666db486STomer Tayar 		if (rc) {
223039651abdSSudarsana Reddy Kalluru 			DP_NOTICE(p_hwfn,
223139651abdSSudarsana Reddy Kalluru 				  "Failed to send DCBX attention request\n");
2232666db486STomer Tayar 			return rc;
223339651abdSSudarsana Reddy Kalluru 		}
223439651abdSSudarsana Reddy Kalluru 
2235fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = true;
2236fe56b9e6SYuval Mintz 	}
2237fe56b9e6SYuval Mintz 
22380fefbfbaSSudarsana Kalluru 	if (IS_PF(cdev)) {
22390fefbfbaSSudarsana Kalluru 		p_hwfn = QED_LEADING_HWFN(cdev);
224050fdf601SSudarsana Reddy Kalluru 
224150fdf601SSudarsana Reddy Kalluru 		/* Get pre-negotiated values for stag, bandwidth etc. */
224250fdf601SSudarsana Reddy Kalluru 		DP_VERBOSE(p_hwfn,
224350fdf601SSudarsana Reddy Kalluru 			   QED_MSG_SPQ,
224450fdf601SSudarsana Reddy Kalluru 			   "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
224550fdf601SSudarsana Reddy Kalluru 		drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
224650fdf601SSudarsana Reddy Kalluru 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
224750fdf601SSudarsana Reddy Kalluru 				 DRV_MSG_CODE_GET_OEM_UPDATES,
224850fdf601SSudarsana Reddy Kalluru 				 drv_mb_param, &resp, &param);
224950fdf601SSudarsana Reddy Kalluru 		if (rc)
225050fdf601SSudarsana Reddy Kalluru 			DP_NOTICE(p_hwfn,
225150fdf601SSudarsana Reddy Kalluru 				  "Failed to send GET_OEM_UPDATES attention request\n");
225250fdf601SSudarsana Reddy Kalluru 
22535d24bcf1STomer Tayar 		drv_mb_param = STORM_FW_VERSION;
22540fefbfbaSSudarsana Kalluru 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
22550fefbfbaSSudarsana Kalluru 				 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
22560fefbfbaSSudarsana Kalluru 				 drv_mb_param, &load_code, &param);
22570fefbfbaSSudarsana Kalluru 		if (rc)
22580fefbfbaSSudarsana Kalluru 			DP_INFO(p_hwfn, "Failed to update firmware version\n");
22590fefbfbaSSudarsana Kalluru 
22600fefbfbaSSudarsana Kalluru 		if (!b_default_mtu) {
22610fefbfbaSSudarsana Kalluru 			rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
22620fefbfbaSSudarsana Kalluru 						   p_hwfn->hw_info.mtu);
22630fefbfbaSSudarsana Kalluru 			if (rc)
22640fefbfbaSSudarsana Kalluru 				DP_INFO(p_hwfn,
22650fefbfbaSSudarsana Kalluru 					"Failed to update default mtu\n");
22660fefbfbaSSudarsana Kalluru 		}
22670fefbfbaSSudarsana Kalluru 
22680fefbfbaSSudarsana Kalluru 		rc = qed_mcp_ov_update_driver_state(p_hwfn,
22690fefbfbaSSudarsana Kalluru 						    p_hwfn->p_main_ptt,
22700fefbfbaSSudarsana Kalluru 						  QED_OV_DRIVER_STATE_DISABLED);
22710fefbfbaSSudarsana Kalluru 		if (rc)
22720fefbfbaSSudarsana Kalluru 			DP_INFO(p_hwfn, "Failed to update driver state\n");
22730fefbfbaSSudarsana Kalluru 
22740fefbfbaSSudarsana Kalluru 		rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
2275538f8d00SSudarsana Reddy Kalluru 					       QED_OV_ESWITCH_NONE);
22760fefbfbaSSudarsana Kalluru 		if (rc)
22770fefbfbaSSudarsana Kalluru 			DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
22780fefbfbaSSudarsana Kalluru 	}
22790fefbfbaSSudarsana Kalluru 
2280fe56b9e6SYuval Mintz 	return 0;
2281666db486STomer Tayar 
2282666db486STomer Tayar load_err:
2283666db486STomer Tayar 	/* The MFW load lock should be released also when initialization fails.
2284666db486STomer Tayar 	 */
2285666db486STomer Tayar 	qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
2286666db486STomer Tayar 	return rc;
2287fe56b9e6SYuval Mintz }
2288fe56b9e6SYuval Mintz 
2289fe56b9e6SYuval Mintz #define QED_HW_STOP_RETRY_LIMIT (10)
22901a635e48SYuval Mintz static void qed_hw_timers_stop(struct qed_dev *cdev,
22911a635e48SYuval Mintz 			       struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
22928c925c44SYuval Mintz {
22938c925c44SYuval Mintz 	int i;
22948c925c44SYuval Mintz 
22958c925c44SYuval Mintz 	/* close timers */
22968c925c44SYuval Mintz 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
22978c925c44SYuval Mintz 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
22988c925c44SYuval Mintz 
229964515dc8STomer Tayar 	if (cdev->recov_in_prog)
230064515dc8STomer Tayar 		return;
230164515dc8STomer Tayar 
23028c925c44SYuval Mintz 	for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
23038c925c44SYuval Mintz 		if ((!qed_rd(p_hwfn, p_ptt,
23048c925c44SYuval Mintz 			     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
23051a635e48SYuval Mintz 		    (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
23068c925c44SYuval Mintz 			break;
23078c925c44SYuval Mintz 
23088c925c44SYuval Mintz 		/* Dependent on number of connection/tasks, possibly
23098c925c44SYuval Mintz 		 * 1ms sleep is required between polls
23108c925c44SYuval Mintz 		 */
23118c925c44SYuval Mintz 		usleep_range(1000, 2000);
23128c925c44SYuval Mintz 	}
23138c925c44SYuval Mintz 
23148c925c44SYuval Mintz 	if (i < QED_HW_STOP_RETRY_LIMIT)
23158c925c44SYuval Mintz 		return;
23168c925c44SYuval Mintz 
23178c925c44SYuval Mintz 	DP_NOTICE(p_hwfn,
23188c925c44SYuval Mintz 		  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
23198c925c44SYuval Mintz 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
23208c925c44SYuval Mintz 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
23218c925c44SYuval Mintz }
23228c925c44SYuval Mintz 
23238c925c44SYuval Mintz void qed_hw_timers_stop_all(struct qed_dev *cdev)
23248c925c44SYuval Mintz {
23258c925c44SYuval Mintz 	int j;
23268c925c44SYuval Mintz 
23278c925c44SYuval Mintz 	for_each_hwfn(cdev, j) {
23288c925c44SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
23298c925c44SYuval Mintz 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
23308c925c44SYuval Mintz 
23318c925c44SYuval Mintz 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
23328c925c44SYuval Mintz 	}
23338c925c44SYuval Mintz }
23348c925c44SYuval Mintz 
2335fe56b9e6SYuval Mintz int qed_hw_stop(struct qed_dev *cdev)
2336fe56b9e6SYuval Mintz {
23371226337aSTomer Tayar 	struct qed_hwfn *p_hwfn;
23381226337aSTomer Tayar 	struct qed_ptt *p_ptt;
23391226337aSTomer Tayar 	int rc, rc2 = 0;
23408c925c44SYuval Mintz 	int j;
2341fe56b9e6SYuval Mintz 
2342fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, j) {
23431226337aSTomer Tayar 		p_hwfn = &cdev->hwfns[j];
23441226337aSTomer Tayar 		p_ptt = p_hwfn->p_main_ptt;
2345fe56b9e6SYuval Mintz 
2346fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
2347fe56b9e6SYuval Mintz 
23481408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
23490b55e27dSYuval Mintz 			qed_vf_pf_int_cleanup(p_hwfn);
23501226337aSTomer Tayar 			rc = qed_vf_pf_reset(p_hwfn);
23511226337aSTomer Tayar 			if (rc) {
23521226337aSTomer Tayar 				DP_NOTICE(p_hwfn,
23531226337aSTomer Tayar 					  "qed_vf_pf_reset failed. rc = %d.\n",
23541226337aSTomer Tayar 					  rc);
23551226337aSTomer Tayar 				rc2 = -EINVAL;
23561226337aSTomer Tayar 			}
23571408cc1fSYuval Mintz 			continue;
23581408cc1fSYuval Mintz 		}
23591408cc1fSYuval Mintz 
2360fe56b9e6SYuval Mintz 		/* mark the hw as uninitialized... */
2361fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = false;
2362fe56b9e6SYuval Mintz 
23631226337aSTomer Tayar 		/* Send unload command to MCP */
236464515dc8STomer Tayar 		if (!cdev->recov_in_prog) {
23651226337aSTomer Tayar 			rc = qed_mcp_unload_req(p_hwfn, p_ptt);
23661226337aSTomer Tayar 			if (rc) {
23678c925c44SYuval Mintz 				DP_NOTICE(p_hwfn,
23681226337aSTomer Tayar 					  "Failed sending a UNLOAD_REQ command. rc = %d.\n",
23691226337aSTomer Tayar 					  rc);
23701226337aSTomer Tayar 				rc2 = -EINVAL;
23711226337aSTomer Tayar 			}
237264515dc8STomer Tayar 		}
23731226337aSTomer Tayar 
23741226337aSTomer Tayar 		qed_slowpath_irq_sync(p_hwfn);
23751226337aSTomer Tayar 
23761226337aSTomer Tayar 		/* After this point no MFW attentions are expected, e.g. prevent
23771226337aSTomer Tayar 		 * race between pf stop and dcbx pf update.
23781226337aSTomer Tayar 		 */
23791226337aSTomer Tayar 		rc = qed_sp_pf_stop(p_hwfn);
23801226337aSTomer Tayar 		if (rc) {
23811226337aSTomer Tayar 			DP_NOTICE(p_hwfn,
23821226337aSTomer Tayar 				  "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
23831226337aSTomer Tayar 				  rc);
23841226337aSTomer Tayar 			rc2 = -EINVAL;
23851226337aSTomer Tayar 		}
2386fe56b9e6SYuval Mintz 
2387fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt,
2388fe56b9e6SYuval Mintz 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
2389fe56b9e6SYuval Mintz 
2390fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
2391fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
2392fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
2393fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
2394fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
2395fe56b9e6SYuval Mintz 
23968c925c44SYuval Mintz 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
2397fe56b9e6SYuval Mintz 
2398fe56b9e6SYuval Mintz 		/* Disable Attention Generation */
2399fe56b9e6SYuval Mintz 		qed_int_igu_disable_int(p_hwfn, p_ptt);
2400fe56b9e6SYuval Mintz 
2401fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
2402fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
2403fe56b9e6SYuval Mintz 
2404fe56b9e6SYuval Mintz 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
2405fe56b9e6SYuval Mintz 
2406fe56b9e6SYuval Mintz 		/* Need to wait 1ms to guarantee SBs are cleared */
2407fe56b9e6SYuval Mintz 		usleep_range(1000, 2000);
24081226337aSTomer Tayar 
24091226337aSTomer Tayar 		/* Disable PF in HW blocks */
24101226337aSTomer Tayar 		qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
24111226337aSTomer Tayar 		qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
24121226337aSTomer Tayar 
241364515dc8STomer Tayar 		if (!cdev->recov_in_prog) {
241464515dc8STomer Tayar 			rc = qed_mcp_unload_done(p_hwfn, p_ptt);
24151226337aSTomer Tayar 			if (rc) {
24161226337aSTomer Tayar 				DP_NOTICE(p_hwfn,
24171226337aSTomer Tayar 					  "Failed sending a UNLOAD_DONE command. rc = %d.\n",
24181226337aSTomer Tayar 					  rc);
24191226337aSTomer Tayar 				rc2 = -EINVAL;
24201226337aSTomer Tayar 			}
2421fe56b9e6SYuval Mintz 		}
242264515dc8STomer Tayar 	}
2423fe56b9e6SYuval Mintz 
242464515dc8STomer Tayar 	if (IS_PF(cdev) && !cdev->recov_in_prog) {
24251226337aSTomer Tayar 		p_hwfn = QED_LEADING_HWFN(cdev);
24261226337aSTomer Tayar 		p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
24271226337aSTomer Tayar 
2428666db486STomer Tayar 		/* Clear the PF's internal FID_enable in the PXP.
2429666db486STomer Tayar 		 * In CMT this should only be done for first hw-function, and
2430666db486STomer Tayar 		 * only after all transactions have stopped for all active
2431666db486STomer Tayar 		 * hw-functions.
2432fe56b9e6SYuval Mintz 		 */
2433666db486STomer Tayar 		rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
24341226337aSTomer Tayar 		if (rc) {
24351226337aSTomer Tayar 			DP_NOTICE(p_hwfn,
2436666db486STomer Tayar 				  "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2437666db486STomer Tayar 				  rc);
24381226337aSTomer Tayar 			rc2 = -EINVAL;
24391226337aSTomer Tayar 		}
24401408cc1fSYuval Mintz 	}
2441fe56b9e6SYuval Mintz 
24421226337aSTomer Tayar 	return rc2;
2443fe56b9e6SYuval Mintz }
2444fe56b9e6SYuval Mintz 
244515582962SRahul Verma int qed_hw_stop_fastpath(struct qed_dev *cdev)
2446cee4d264SManish Chopra {
24478c925c44SYuval Mintz 	int j;
2448cee4d264SManish Chopra 
2449cee4d264SManish Chopra 	for_each_hwfn(cdev, j) {
2450cee4d264SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
245115582962SRahul Verma 		struct qed_ptt *p_ptt;
2452cee4d264SManish Chopra 
2453dacd88d6SYuval Mintz 		if (IS_VF(cdev)) {
2454dacd88d6SYuval Mintz 			qed_vf_pf_int_cleanup(p_hwfn);
2455dacd88d6SYuval Mintz 			continue;
2456dacd88d6SYuval Mintz 		}
245715582962SRahul Verma 		p_ptt = qed_ptt_acquire(p_hwfn);
245815582962SRahul Verma 		if (!p_ptt)
245915582962SRahul Verma 			return -EAGAIN;
2460dacd88d6SYuval Mintz 
2461cee4d264SManish Chopra 		DP_VERBOSE(p_hwfn,
24621a635e48SYuval Mintz 			   NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
2463cee4d264SManish Chopra 
2464cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt,
2465cee4d264SManish Chopra 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
2466cee4d264SManish Chopra 
2467cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
2468cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
2469cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
2470cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
2471cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
2472cee4d264SManish Chopra 
2473cee4d264SManish Chopra 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
2474cee4d264SManish Chopra 
2475cee4d264SManish Chopra 		/* Need to wait 1ms to guarantee SBs are cleared */
2476cee4d264SManish Chopra 		usleep_range(1000, 2000);
247715582962SRahul Verma 		qed_ptt_release(p_hwfn, p_ptt);
2478cee4d264SManish Chopra 	}
2479cee4d264SManish Chopra 
248015582962SRahul Verma 	return 0;
248115582962SRahul Verma }
248215582962SRahul Verma 
248315582962SRahul Verma int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
2484cee4d264SManish Chopra {
248515582962SRahul Verma 	struct qed_ptt *p_ptt;
248615582962SRahul Verma 
2487dacd88d6SYuval Mintz 	if (IS_VF(p_hwfn->cdev))
248815582962SRahul Verma 		return 0;
248915582962SRahul Verma 
249015582962SRahul Verma 	p_ptt = qed_ptt_acquire(p_hwfn);
249115582962SRahul Verma 	if (!p_ptt)
249215582962SRahul Verma 		return -EAGAIN;
2493dacd88d6SYuval Mintz 
2494f855df22SMichal Kalderon 	if (p_hwfn->p_rdma_info &&
2495291d57f6SMichal Kalderon 	    p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs)
2496f855df22SMichal Kalderon 		qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
2497f855df22SMichal Kalderon 
2498cee4d264SManish Chopra 	/* Re-open incoming traffic */
249915582962SRahul Verma 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
250015582962SRahul Verma 	qed_ptt_release(p_hwfn, p_ptt);
250115582962SRahul Verma 
250215582962SRahul Verma 	return 0;
2503cee4d264SManish Chopra }
2504cee4d264SManish Chopra 
2505fe56b9e6SYuval Mintz /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
2506fe56b9e6SYuval Mintz static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
2507fe56b9e6SYuval Mintz {
2508fe56b9e6SYuval Mintz 	qed_ptt_pool_free(p_hwfn);
2509fe56b9e6SYuval Mintz 	kfree(p_hwfn->hw_info.p_igu_info);
25103587cb87STomer Tayar 	p_hwfn->hw_info.p_igu_info = NULL;
2511fe56b9e6SYuval Mintz }
2512fe56b9e6SYuval Mintz 
2513fe56b9e6SYuval Mintz /* Setup bar access */
251412e09c69SYuval Mintz static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
2515fe56b9e6SYuval Mintz {
2516fe56b9e6SYuval Mintz 	/* clear indirect access */
25179c79ddaaSMintz, Yuval 	if (QED_IS_AH(p_hwfn->cdev)) {
25189c79ddaaSMintz, Yuval 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
25199c79ddaaSMintz, Yuval 		       PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
25209c79ddaaSMintz, Yuval 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
25219c79ddaaSMintz, Yuval 		       PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
25229c79ddaaSMintz, Yuval 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
25239c79ddaaSMintz, Yuval 		       PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
25249c79ddaaSMintz, Yuval 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
25259c79ddaaSMintz, Yuval 		       PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
25269c79ddaaSMintz, Yuval 	} else {
25279c79ddaaSMintz, Yuval 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
25289c79ddaaSMintz, Yuval 		       PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
25299c79ddaaSMintz, Yuval 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
25309c79ddaaSMintz, Yuval 		       PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
25319c79ddaaSMintz, Yuval 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
25329c79ddaaSMintz, Yuval 		       PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
25339c79ddaaSMintz, Yuval 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
25349c79ddaaSMintz, Yuval 		       PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
25359c79ddaaSMintz, Yuval 	}
2536fe56b9e6SYuval Mintz 
2537666db486STomer Tayar 	/* Clean previous pglue_b errors if such exist */
2538666db486STomer Tayar 	qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
2539fe56b9e6SYuval Mintz 
2540fe56b9e6SYuval Mintz 	/* enable internal target-read */
2541fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2542fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
2543fe56b9e6SYuval Mintz }
2544fe56b9e6SYuval Mintz 
2545fe56b9e6SYuval Mintz static void get_function_id(struct qed_hwfn *p_hwfn)
2546fe56b9e6SYuval Mintz {
2547fe56b9e6SYuval Mintz 	/* ME Register */
25481a635e48SYuval Mintz 	p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
25491a635e48SYuval Mintz 						  PXP_PF_ME_OPAQUE_ADDR);
2550fe56b9e6SYuval Mintz 
2551fe56b9e6SYuval Mintz 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
2552fe56b9e6SYuval Mintz 
2553fe56b9e6SYuval Mintz 	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
2554fe56b9e6SYuval Mintz 	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
2555fe56b9e6SYuval Mintz 				      PXP_CONCRETE_FID_PFID);
2556fe56b9e6SYuval Mintz 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
2557fe56b9e6SYuval Mintz 				    PXP_CONCRETE_FID_PORT);
2558525ef5c0SYuval Mintz 
2559525ef5c0SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
2560525ef5c0SYuval Mintz 		   "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
2561525ef5c0SYuval Mintz 		   p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
2562fe56b9e6SYuval Mintz }
2563fe56b9e6SYuval Mintz 
256425c089d7SYuval Mintz static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
256525c089d7SYuval Mintz {
256625c089d7SYuval Mintz 	u32 *feat_num = p_hwfn->hw_info.feat_num;
2567ebbdcc66SMintz, Yuval 	struct qed_sb_cnt_info sb_cnt;
2568810bb1f0SMintz, Yuval 	u32 non_l2_sbs = 0;
256925c089d7SYuval Mintz 
2570ebbdcc66SMintz, Yuval 	memset(&sb_cnt, 0, sizeof(sb_cnt));
2571ebbdcc66SMintz, Yuval 	qed_int_get_num_sbs(p_hwfn, &sb_cnt);
2572ebbdcc66SMintz, Yuval 
25730189efb8SYuval Mintz 	if (IS_ENABLED(CONFIG_QED_RDMA) &&
2574c851a9dcSKalderon, Michal 	    QED_IS_RDMA_PERSONALITY(p_hwfn)) {
25750189efb8SYuval Mintz 		/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
25760189efb8SYuval Mintz 		 * the status blocks equally between L2 / RoCE but with
25770189efb8SYuval Mintz 		 * consideration as to how many l2 queues / cnqs we have.
257851ff1725SRam Amrani 		 */
257951ff1725SRam Amrani 		feat_num[QED_RDMA_CNQ] =
2580ebbdcc66SMintz, Yuval 			min_t(u32, sb_cnt.cnt / 2,
258151ff1725SRam Amrani 			      RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
2582810bb1f0SMintz, Yuval 
2583810bb1f0SMintz, Yuval 		non_l2_sbs = feat_num[QED_RDMA_CNQ];
258451ff1725SRam Amrani 	}
2585c851a9dcSKalderon, Michal 	if (QED_IS_L2_PERSONALITY(p_hwfn)) {
2586dec26533SMintz, Yuval 		/* Start by allocating VF queues, then PF's */
2587dec26533SMintz, Yuval 		feat_num[QED_VF_L2_QUE] = min_t(u32,
2588dec26533SMintz, Yuval 						RESC_NUM(p_hwfn, QED_L2_QUEUE),
2589ebbdcc66SMintz, Yuval 						sb_cnt.iov_cnt);
2590810bb1f0SMintz, Yuval 		feat_num[QED_PF_L2_QUE] = min_t(u32,
2591ebbdcc66SMintz, Yuval 						sb_cnt.cnt - non_l2_sbs,
2592dec26533SMintz, Yuval 						RESC_NUM(p_hwfn,
2593dec26533SMintz, Yuval 							 QED_L2_QUEUE) -
2594dec26533SMintz, Yuval 						FEAT_NUM(p_hwfn,
2595dec26533SMintz, Yuval 							 QED_VF_L2_QUE));
2596dec26533SMintz, Yuval 	}
25975a1f965aSMintz, Yuval 
2598c851a9dcSKalderon, Michal 	if (QED_IS_FCOE_PERSONALITY(p_hwfn))
25993c5da942SMintz, Yuval 		feat_num[QED_FCOE_CQ] =  min_t(u32, sb_cnt.cnt,
26003c5da942SMintz, Yuval 					       RESC_NUM(p_hwfn,
26013c5da942SMintz, Yuval 							QED_CMDQS_CQS));
26023c5da942SMintz, Yuval 
2603c851a9dcSKalderon, Michal 	if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
2604ebbdcc66SMintz, Yuval 		feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
260508737a3fSMintz, Yuval 					       RESC_NUM(p_hwfn,
260608737a3fSMintz, Yuval 							QED_CMDQS_CQS));
26075a1f965aSMintz, Yuval 	DP_VERBOSE(p_hwfn,
26085a1f965aSMintz, Yuval 		   NETIF_MSG_PROBE,
26093c5da942SMintz, Yuval 		   "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n",
26105a1f965aSMintz, Yuval 		   (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
26115a1f965aSMintz, Yuval 		   (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
26125a1f965aSMintz, Yuval 		   (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
26133c5da942SMintz, Yuval 		   (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ),
261408737a3fSMintz, Yuval 		   (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
2615ebbdcc66SMintz, Yuval 		   (int)sb_cnt.cnt);
261625c089d7SYuval Mintz }
261725c089d7SYuval Mintz 
26189c8517c4STomer Tayar const char *qed_hw_get_resc_name(enum qed_resources res_id)
26192edbff8dSTomer Tayar {
26202edbff8dSTomer Tayar 	switch (res_id) {
26212edbff8dSTomer Tayar 	case QED_L2_QUEUE:
26222edbff8dSTomer Tayar 		return "L2_QUEUE";
26232edbff8dSTomer Tayar 	case QED_VPORT:
26242edbff8dSTomer Tayar 		return "VPORT";
26252edbff8dSTomer Tayar 	case QED_RSS_ENG:
26262edbff8dSTomer Tayar 		return "RSS_ENG";
26272edbff8dSTomer Tayar 	case QED_PQ:
26282edbff8dSTomer Tayar 		return "PQ";
26292edbff8dSTomer Tayar 	case QED_RL:
26302edbff8dSTomer Tayar 		return "RL";
26312edbff8dSTomer Tayar 	case QED_MAC:
26322edbff8dSTomer Tayar 		return "MAC";
26332edbff8dSTomer Tayar 	case QED_VLAN:
26342edbff8dSTomer Tayar 		return "VLAN";
26352edbff8dSTomer Tayar 	case QED_RDMA_CNQ_RAM:
26362edbff8dSTomer Tayar 		return "RDMA_CNQ_RAM";
26372edbff8dSTomer Tayar 	case QED_ILT:
26382edbff8dSTomer Tayar 		return "ILT";
26392edbff8dSTomer Tayar 	case QED_LL2_QUEUE:
26402edbff8dSTomer Tayar 		return "LL2_QUEUE";
26412edbff8dSTomer Tayar 	case QED_CMDQS_CQS:
26422edbff8dSTomer Tayar 		return "CMDQS_CQS";
26432edbff8dSTomer Tayar 	case QED_RDMA_STATS_QUEUE:
26442edbff8dSTomer Tayar 		return "RDMA_STATS_QUEUE";
26459c8517c4STomer Tayar 	case QED_BDQ:
26469c8517c4STomer Tayar 		return "BDQ";
26479c8517c4STomer Tayar 	case QED_SB:
26489c8517c4STomer Tayar 		return "SB";
26492edbff8dSTomer Tayar 	default:
26502edbff8dSTomer Tayar 		return "UNKNOWN_RESOURCE";
26512edbff8dSTomer Tayar 	}
26522edbff8dSTomer Tayar }
26532edbff8dSTomer Tayar 
26549c8517c4STomer Tayar static int
26559c8517c4STomer Tayar __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
26569c8517c4STomer Tayar 			    struct qed_ptt *p_ptt,
26579c8517c4STomer Tayar 			    enum qed_resources res_id,
26589c8517c4STomer Tayar 			    u32 resc_max_val, u32 *p_mcp_resp)
26599c8517c4STomer Tayar {
26609c8517c4STomer Tayar 	int rc;
26619c8517c4STomer Tayar 
26629c8517c4STomer Tayar 	rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
26639c8517c4STomer Tayar 				      resc_max_val, p_mcp_resp);
26649c8517c4STomer Tayar 	if (rc) {
26659c8517c4STomer Tayar 		DP_NOTICE(p_hwfn,
26669c8517c4STomer Tayar 			  "MFW response failure for a max value setting of resource %d [%s]\n",
26679c8517c4STomer Tayar 			  res_id, qed_hw_get_resc_name(res_id));
26689c8517c4STomer Tayar 		return rc;
26699c8517c4STomer Tayar 	}
26709c8517c4STomer Tayar 
26719c8517c4STomer Tayar 	if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
26729c8517c4STomer Tayar 		DP_INFO(p_hwfn,
26739c8517c4STomer Tayar 			"Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
26749c8517c4STomer Tayar 			res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
26759c8517c4STomer Tayar 
26769c8517c4STomer Tayar 	return 0;
26779c8517c4STomer Tayar }
26789c8517c4STomer Tayar 
26799c8517c4STomer Tayar static int
26809c8517c4STomer Tayar qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
26819c8517c4STomer Tayar {
26829c8517c4STomer Tayar 	bool b_ah = QED_IS_AH(p_hwfn->cdev);
26839c8517c4STomer Tayar 	u32 resc_max_val, mcp_resp;
26849c8517c4STomer Tayar 	u8 res_id;
26859c8517c4STomer Tayar 	int rc;
26869c8517c4STomer Tayar 
26879c8517c4STomer Tayar 	for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
26889c8517c4STomer Tayar 		switch (res_id) {
26899c8517c4STomer Tayar 		case QED_LL2_QUEUE:
26909c8517c4STomer Tayar 			resc_max_val = MAX_NUM_LL2_RX_QUEUES;
26919c8517c4STomer Tayar 			break;
26929c8517c4STomer Tayar 		case QED_RDMA_CNQ_RAM:
26939c8517c4STomer Tayar 			/* No need for a case for QED_CMDQS_CQS since
26949c8517c4STomer Tayar 			 * CNQ/CMDQS are the same resource.
26959c8517c4STomer Tayar 			 */
2696da090917STomer Tayar 			resc_max_val = NUM_OF_GLOBAL_QUEUES;
26979c8517c4STomer Tayar 			break;
26989c8517c4STomer Tayar 		case QED_RDMA_STATS_QUEUE:
26999c8517c4STomer Tayar 			resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
27009c8517c4STomer Tayar 			    : RDMA_NUM_STATISTIC_COUNTERS_BB;
27019c8517c4STomer Tayar 			break;
27029c8517c4STomer Tayar 		case QED_BDQ:
27039c8517c4STomer Tayar 			resc_max_val = BDQ_NUM_RESOURCES;
27049c8517c4STomer Tayar 			break;
27059c8517c4STomer Tayar 		default:
27069c8517c4STomer Tayar 			continue;
27079c8517c4STomer Tayar 		}
27089c8517c4STomer Tayar 
27099c8517c4STomer Tayar 		rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
27109c8517c4STomer Tayar 						 resc_max_val, &mcp_resp);
27119c8517c4STomer Tayar 		if (rc)
27129c8517c4STomer Tayar 			return rc;
27139c8517c4STomer Tayar 
27149c8517c4STomer Tayar 		/* There's no point to continue to the next resource if the
27159c8517c4STomer Tayar 		 * command is not supported by the MFW.
27169c8517c4STomer Tayar 		 * We do continue if the command is supported but the resource
27179c8517c4STomer Tayar 		 * is unknown to the MFW. Such a resource will be later
27189c8517c4STomer Tayar 		 * configured with the default allocation values.
27199c8517c4STomer Tayar 		 */
27209c8517c4STomer Tayar 		if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
27219c8517c4STomer Tayar 			return -EINVAL;
27229c8517c4STomer Tayar 	}
27239c8517c4STomer Tayar 
27249c8517c4STomer Tayar 	return 0;
27259c8517c4STomer Tayar }
27269c8517c4STomer Tayar 
27279c8517c4STomer Tayar static
27289c8517c4STomer Tayar int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
27299c8517c4STomer Tayar 			 enum qed_resources res_id,
27309c8517c4STomer Tayar 			 u32 *p_resc_num, u32 *p_resc_start)
27319c8517c4STomer Tayar {
27329c8517c4STomer Tayar 	u8 num_funcs = p_hwfn->num_funcs_on_engine;
27339c8517c4STomer Tayar 	bool b_ah = QED_IS_AH(p_hwfn->cdev);
27349c8517c4STomer Tayar 
27359c8517c4STomer Tayar 	switch (res_id) {
27369c8517c4STomer Tayar 	case QED_L2_QUEUE:
27379c8517c4STomer Tayar 		*p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
27389c8517c4STomer Tayar 			       MAX_NUM_L2_QUEUES_BB) / num_funcs;
27399c8517c4STomer Tayar 		break;
27409c8517c4STomer Tayar 	case QED_VPORT:
27419c8517c4STomer Tayar 		*p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
27429c8517c4STomer Tayar 			       MAX_NUM_VPORTS_BB) / num_funcs;
27439c8517c4STomer Tayar 		break;
27449c8517c4STomer Tayar 	case QED_RSS_ENG:
27459c8517c4STomer Tayar 		*p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
27469c8517c4STomer Tayar 			       ETH_RSS_ENGINE_NUM_BB) / num_funcs;
27479c8517c4STomer Tayar 		break;
27489c8517c4STomer Tayar 	case QED_PQ:
27499c8517c4STomer Tayar 		*p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
27509c8517c4STomer Tayar 			       MAX_QM_TX_QUEUES_BB) / num_funcs;
27519c8517c4STomer Tayar 		*p_resc_num &= ~0x7;	/* The granularity of the PQs is 8 */
27529c8517c4STomer Tayar 		break;
27539c8517c4STomer Tayar 	case QED_RL:
27549c8517c4STomer Tayar 		*p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
27559c8517c4STomer Tayar 		break;
27569c8517c4STomer Tayar 	case QED_MAC:
27579c8517c4STomer Tayar 	case QED_VLAN:
27589c8517c4STomer Tayar 		/* Each VFC resource can accommodate both a MAC and a VLAN */
27599c8517c4STomer Tayar 		*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
27609c8517c4STomer Tayar 		break;
27619c8517c4STomer Tayar 	case QED_ILT:
27629c8517c4STomer Tayar 		*p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
27639c8517c4STomer Tayar 			       PXP_NUM_ILT_RECORDS_BB) / num_funcs;
27649c8517c4STomer Tayar 		break;
27659c8517c4STomer Tayar 	case QED_LL2_QUEUE:
27669c8517c4STomer Tayar 		*p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
27679c8517c4STomer Tayar 		break;
27689c8517c4STomer Tayar 	case QED_RDMA_CNQ_RAM:
27699c8517c4STomer Tayar 	case QED_CMDQS_CQS:
27709c8517c4STomer Tayar 		/* CNQ/CMDQS are the same resource */
2771da090917STomer Tayar 		*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
27729c8517c4STomer Tayar 		break;
27739c8517c4STomer Tayar 	case QED_RDMA_STATS_QUEUE:
27749c8517c4STomer Tayar 		*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
27759c8517c4STomer Tayar 			       RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
27769c8517c4STomer Tayar 		break;
27779c8517c4STomer Tayar 	case QED_BDQ:
27789c8517c4STomer Tayar 		if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
27799c8517c4STomer Tayar 		    p_hwfn->hw_info.personality != QED_PCI_FCOE)
27809c8517c4STomer Tayar 			*p_resc_num = 0;
27819c8517c4STomer Tayar 		else
27829c8517c4STomer Tayar 			*p_resc_num = 1;
27839c8517c4STomer Tayar 		break;
27849c8517c4STomer Tayar 	case QED_SB:
2785ebbdcc66SMintz, Yuval 		/* Since we want its value to reflect whether MFW supports
2786ebbdcc66SMintz, Yuval 		 * the new scheme, have a default of 0.
2787ebbdcc66SMintz, Yuval 		 */
2788ebbdcc66SMintz, Yuval 		*p_resc_num = 0;
27899c8517c4STomer Tayar 		break;
27909c8517c4STomer Tayar 	default:
27919c8517c4STomer Tayar 		return -EINVAL;
27929c8517c4STomer Tayar 	}
27939c8517c4STomer Tayar 
27949c8517c4STomer Tayar 	switch (res_id) {
27959c8517c4STomer Tayar 	case QED_BDQ:
27969c8517c4STomer Tayar 		if (!*p_resc_num)
27979c8517c4STomer Tayar 			*p_resc_start = 0;
279878cea9ffSTomer Tayar 		else if (p_hwfn->cdev->num_ports_in_engine == 4)
27999c8517c4STomer Tayar 			*p_resc_start = p_hwfn->port_id;
28009c8517c4STomer Tayar 		else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
28019c8517c4STomer Tayar 			*p_resc_start = p_hwfn->port_id;
28029c8517c4STomer Tayar 		else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
28039c8517c4STomer Tayar 			*p_resc_start = p_hwfn->port_id + 2;
28049c8517c4STomer Tayar 		break;
28059c8517c4STomer Tayar 	default:
28069c8517c4STomer Tayar 		*p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
28079c8517c4STomer Tayar 		break;
28089c8517c4STomer Tayar 	}
28099c8517c4STomer Tayar 
28109c8517c4STomer Tayar 	return 0;
28119c8517c4STomer Tayar }
28129c8517c4STomer Tayar 
28139c8517c4STomer Tayar static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
28142edbff8dSTomer Tayar 				  enum qed_resources res_id)
28152edbff8dSTomer Tayar {
28169c8517c4STomer Tayar 	u32 dflt_resc_num = 0, dflt_resc_start = 0;
28179c8517c4STomer Tayar 	u32 mcp_resp, *p_resc_num, *p_resc_start;
28182edbff8dSTomer Tayar 	int rc;
28192edbff8dSTomer Tayar 
28202edbff8dSTomer Tayar 	p_resc_num = &RESC_NUM(p_hwfn, res_id);
28212edbff8dSTomer Tayar 	p_resc_start = &RESC_START(p_hwfn, res_id);
28222edbff8dSTomer Tayar 
28239c8517c4STomer Tayar 	rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
28249c8517c4STomer Tayar 				  &dflt_resc_start);
28259c8517c4STomer Tayar 	if (rc) {
28262edbff8dSTomer Tayar 		DP_ERR(p_hwfn,
28272edbff8dSTomer Tayar 		       "Failed to get default amount for resource %d [%s]\n",
28282edbff8dSTomer Tayar 		       res_id, qed_hw_get_resc_name(res_id));
28299c8517c4STomer Tayar 		return rc;
28302edbff8dSTomer Tayar 	}
28312edbff8dSTomer Tayar 
28329c8517c4STomer Tayar 	rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
28339c8517c4STomer Tayar 				   &mcp_resp, p_resc_num, p_resc_start);
28342edbff8dSTomer Tayar 	if (rc) {
28352edbff8dSTomer Tayar 		DP_NOTICE(p_hwfn,
28362edbff8dSTomer Tayar 			  "MFW response failure for an allocation request for resource %d [%s]\n",
28372edbff8dSTomer Tayar 			  res_id, qed_hw_get_resc_name(res_id));
28382edbff8dSTomer Tayar 		return rc;
28392edbff8dSTomer Tayar 	}
28402edbff8dSTomer Tayar 
28412edbff8dSTomer Tayar 	/* Default driver values are applied in the following cases:
28422edbff8dSTomer Tayar 	 * - The resource allocation MB command is not supported by the MFW
28432edbff8dSTomer Tayar 	 * - There is an internal error in the MFW while processing the request
28442edbff8dSTomer Tayar 	 * - The resource ID is unknown to the MFW
28452edbff8dSTomer Tayar 	 */
28469c8517c4STomer Tayar 	if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
28479c8517c4STomer Tayar 		DP_INFO(p_hwfn,
28489c8517c4STomer Tayar 			"Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
28492edbff8dSTomer Tayar 			res_id,
28502edbff8dSTomer Tayar 			qed_hw_get_resc_name(res_id),
28512edbff8dSTomer Tayar 			mcp_resp, dflt_resc_num, dflt_resc_start);
28522edbff8dSTomer Tayar 		*p_resc_num = dflt_resc_num;
28532edbff8dSTomer Tayar 		*p_resc_start = dflt_resc_start;
28542edbff8dSTomer Tayar 		goto out;
28552edbff8dSTomer Tayar 	}
28562edbff8dSTomer Tayar 
28572edbff8dSTomer Tayar out:
28582edbff8dSTomer Tayar 	/* PQs have to divide by 8 [that's the HW granularity].
28592edbff8dSTomer Tayar 	 * Reduce number so it would fit.
28602edbff8dSTomer Tayar 	 */
28612edbff8dSTomer Tayar 	if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) {
28622edbff8dSTomer Tayar 		DP_INFO(p_hwfn,
28632edbff8dSTomer Tayar 			"PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
28642edbff8dSTomer Tayar 			*p_resc_num,
28652edbff8dSTomer Tayar 			(*p_resc_num) & ~0x7,
28662edbff8dSTomer Tayar 			*p_resc_start, (*p_resc_start) & ~0x7);
28672edbff8dSTomer Tayar 		*p_resc_num &= ~0x7;
28682edbff8dSTomer Tayar 		*p_resc_start &= ~0x7;
28692edbff8dSTomer Tayar 	}
28702edbff8dSTomer Tayar 
28712edbff8dSTomer Tayar 	return 0;
28722edbff8dSTomer Tayar }
28732edbff8dSTomer Tayar 
28749c8517c4STomer Tayar static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
2875fe56b9e6SYuval Mintz {
28769c8517c4STomer Tayar 	int rc;
28779c8517c4STomer Tayar 	u8 res_id;
28789c8517c4STomer Tayar 
28799c8517c4STomer Tayar 	for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
28809c8517c4STomer Tayar 		rc = __qed_hw_set_resc_info(p_hwfn, res_id);
28819c8517c4STomer Tayar 		if (rc)
28829c8517c4STomer Tayar 			return rc;
28839c8517c4STomer Tayar 	}
28849c8517c4STomer Tayar 
28859c8517c4STomer Tayar 	return 0;
28869c8517c4STomer Tayar }
28879c8517c4STomer Tayar 
28889c8517c4STomer Tayar static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
28899c8517c4STomer Tayar {
28909c8517c4STomer Tayar 	struct qed_resc_unlock_params resc_unlock_params;
28919c8517c4STomer Tayar 	struct qed_resc_lock_params resc_lock_params;
28929c79ddaaSMintz, Yuval 	bool b_ah = QED_IS_AH(p_hwfn->cdev);
28932edbff8dSTomer Tayar 	u8 res_id;
28942edbff8dSTomer Tayar 	int rc;
2895fe56b9e6SYuval Mintz 
28969c8517c4STomer Tayar 	/* Setting the max values of the soft resources and the following
28979c8517c4STomer Tayar 	 * resources allocation queries should be atomic. Since several PFs can
28989c8517c4STomer Tayar 	 * run in parallel - a resource lock is needed.
28999c8517c4STomer Tayar 	 * If either the resource lock or resource set value commands are not
29009c8517c4STomer Tayar 	 * supported - skip the the max values setting, release the lock if
29019c8517c4STomer Tayar 	 * needed, and proceed to the queries. Other failures, including a
29029c8517c4STomer Tayar 	 * failure to acquire the lock, will cause this function to fail.
29039c8517c4STomer Tayar 	 */
2904f470f22cSsudarsana.kalluru@cavium.com 	qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
2905f470f22cSsudarsana.kalluru@cavium.com 				       QED_RESC_LOCK_RESC_ALLOC, false);
29069c8517c4STomer Tayar 
29079c8517c4STomer Tayar 	rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
29089c8517c4STomer Tayar 	if (rc && rc != -EINVAL) {
29092edbff8dSTomer Tayar 		return rc;
29109c8517c4STomer Tayar 	} else if (rc == -EINVAL) {
29119c8517c4STomer Tayar 		DP_INFO(p_hwfn,
29129c8517c4STomer Tayar 			"Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
29139c8517c4STomer Tayar 	} else if (!rc && !resc_lock_params.b_granted) {
29149c8517c4STomer Tayar 		DP_NOTICE(p_hwfn,
29159c8517c4STomer Tayar 			  "Failed to acquire the resource lock for the resource allocation commands\n");
29169c8517c4STomer Tayar 		return -EBUSY;
29179c8517c4STomer Tayar 	} else {
29189c8517c4STomer Tayar 		rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
29199c8517c4STomer Tayar 		if (rc && rc != -EINVAL) {
29209c8517c4STomer Tayar 			DP_NOTICE(p_hwfn,
29219c8517c4STomer Tayar 				  "Failed to set the max values of the soft resources\n");
29229c8517c4STomer Tayar 			goto unlock_and_exit;
29239c8517c4STomer Tayar 		} else if (rc == -EINVAL) {
29249c8517c4STomer Tayar 			DP_INFO(p_hwfn,
29259c8517c4STomer Tayar 				"Skip the max values setting of the soft resources since it is not supported by the MFW\n");
29269c8517c4STomer Tayar 			rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
29279c8517c4STomer Tayar 						 &resc_unlock_params);
29289c8517c4STomer Tayar 			if (rc)
29299c8517c4STomer Tayar 				DP_INFO(p_hwfn,
29309c8517c4STomer Tayar 					"Failed to release the resource lock for the resource allocation commands\n");
29319c8517c4STomer Tayar 		}
29329c8517c4STomer Tayar 	}
29339c8517c4STomer Tayar 
29349c8517c4STomer Tayar 	rc = qed_hw_set_resc_info(p_hwfn);
29359c8517c4STomer Tayar 	if (rc)
29369c8517c4STomer Tayar 		goto unlock_and_exit;
29379c8517c4STomer Tayar 
29389c8517c4STomer Tayar 	if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
29399c8517c4STomer Tayar 		rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
29409c8517c4STomer Tayar 		if (rc)
29419c8517c4STomer Tayar 			DP_INFO(p_hwfn,
29429c8517c4STomer Tayar 				"Failed to release the resource lock for the resource allocation commands\n");
29432edbff8dSTomer Tayar 	}
2944dbb799c3SYuval Mintz 
2945dbb799c3SYuval Mintz 	/* Sanity for ILT */
29469c79ddaaSMintz, Yuval 	if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
29479c79ddaaSMintz, Yuval 	    (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
2948dbb799c3SYuval Mintz 		DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
2949dbb799c3SYuval Mintz 			  RESC_START(p_hwfn, QED_ILT),
2950dbb799c3SYuval Mintz 			  RESC_END(p_hwfn, QED_ILT) - 1);
2951dbb799c3SYuval Mintz 		return -EINVAL;
2952dbb799c3SYuval Mintz 	}
2953fe56b9e6SYuval Mintz 
2954ebbdcc66SMintz, Yuval 	/* This will also learn the number of SBs from MFW */
2955ebbdcc66SMintz, Yuval 	if (qed_int_igu_reset_cam(p_hwfn, p_ptt))
2956ebbdcc66SMintz, Yuval 		return -EINVAL;
2957ebbdcc66SMintz, Yuval 
295825c089d7SYuval Mintz 	qed_hw_set_feat(p_hwfn);
295925c089d7SYuval Mintz 
29602edbff8dSTomer Tayar 	for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
29612edbff8dSTomer Tayar 		DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
29622edbff8dSTomer Tayar 			   qed_hw_get_resc_name(res_id),
29632edbff8dSTomer Tayar 			   RESC_NUM(p_hwfn, res_id),
29642edbff8dSTomer Tayar 			   RESC_START(p_hwfn, res_id));
2965dbb799c3SYuval Mintz 
2966dbb799c3SYuval Mintz 	return 0;
29679c8517c4STomer Tayar 
29689c8517c4STomer Tayar unlock_and_exit:
29699c8517c4STomer Tayar 	if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
29709c8517c4STomer Tayar 		qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
29719c8517c4STomer Tayar 	return rc;
2972fe56b9e6SYuval Mintz }
2973fe56b9e6SYuval Mintz 
29741a635e48SYuval Mintz static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2975fe56b9e6SYuval Mintz {
2976fc48b7a6SYuval Mintz 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
29771e128c81SArun Easi 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
2978645874e5SSudarsana Reddy Kalluru 	struct qed_mcp_link_capabilities *p_caps;
2979cc875c2eSYuval Mintz 	struct qed_mcp_link_params *link;
2980fe56b9e6SYuval Mintz 
2981fe56b9e6SYuval Mintz 	/* Read global nvm_cfg address */
2982fe56b9e6SYuval Mintz 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2983fe56b9e6SYuval Mintz 
2984fe56b9e6SYuval Mintz 	/* Verify MCP has initialized it */
2985fe56b9e6SYuval Mintz 	if (!nvm_cfg_addr) {
2986fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
2987fe56b9e6SYuval Mintz 		return -EINVAL;
2988fe56b9e6SYuval Mintz 	}
2989fe56b9e6SYuval Mintz 
2990fe56b9e6SYuval Mintz 	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
2991fe56b9e6SYuval Mintz 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2992fe56b9e6SYuval Mintz 
2993cc875c2eSYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2994cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
2995cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1_glob, core_cfg);
2996cc875c2eSYuval Mintz 
2997cc875c2eSYuval Mintz 	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
2998cc875c2eSYuval Mintz 
2999cc875c2eSYuval Mintz 	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
3000cc875c2eSYuval Mintz 		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
3001351a4dedSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
3002cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
3003cc875c2eSYuval Mintz 		break;
3004351a4dedSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
3005cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
3006cc875c2eSYuval Mintz 		break;
3007351a4dedSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
3008cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
3009cc875c2eSYuval Mintz 		break;
3010351a4dedSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
3011cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
3012cc875c2eSYuval Mintz 		break;
3013351a4dedSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
3014cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
3015cc875c2eSYuval Mintz 		break;
3016351a4dedSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
3017cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
3018cc875c2eSYuval Mintz 		break;
3019351a4dedSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
3020cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
3021cc875c2eSYuval Mintz 		break;
3022351a4dedSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
3023cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
3024cc875c2eSYuval Mintz 		break;
30259c79ddaaSMintz, Yuval 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
30269c79ddaaSMintz, Yuval 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
30279c79ddaaSMintz, Yuval 		break;
3028351a4dedSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
3029cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
3030cc875c2eSYuval Mintz 		break;
30319c79ddaaSMintz, Yuval 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
30329c79ddaaSMintz, Yuval 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
30339c79ddaaSMintz, Yuval 		break;
3034cc875c2eSYuval Mintz 	default:
30351a635e48SYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
3036cc875c2eSYuval Mintz 		break;
3037cc875c2eSYuval Mintz 	}
3038cc875c2eSYuval Mintz 
3039cc875c2eSYuval Mintz 	/* Read default link configuration */
3040cc875c2eSYuval Mintz 	link = &p_hwfn->mcp_info->link_input;
3041645874e5SSudarsana Reddy Kalluru 	p_caps = &p_hwfn->mcp_info->link_capabilities;
3042cc875c2eSYuval Mintz 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
3043cc875c2eSYuval Mintz 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
3044cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
3045cc875c2eSYuval Mintz 			   port_cfg_addr +
3046cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
304783aeb933SYuval Mintz 	link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
304883aeb933SYuval Mintz 	link->speed.advertised_speeds = link_temp;
3049cc875c2eSYuval Mintz 
305083aeb933SYuval Mintz 	link_temp = link->speed.advertised_speeds;
305183aeb933SYuval Mintz 	p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
3052cc875c2eSYuval Mintz 
3053cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
3054cc875c2eSYuval Mintz 			   port_cfg_addr +
3055cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, link_settings));
3056cc875c2eSYuval Mintz 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
3057cc875c2eSYuval Mintz 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
3058cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
3059cc875c2eSYuval Mintz 		link->speed.autoneg = true;
3060cc875c2eSYuval Mintz 		break;
3061cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
3062cc875c2eSYuval Mintz 		link->speed.forced_speed = 1000;
3063cc875c2eSYuval Mintz 		break;
3064cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
3065cc875c2eSYuval Mintz 		link->speed.forced_speed = 10000;
3066cc875c2eSYuval Mintz 		break;
30675bf0961cSSudarsana Reddy Kalluru 	case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
30685bf0961cSSudarsana Reddy Kalluru 		link->speed.forced_speed = 20000;
30695bf0961cSSudarsana Reddy Kalluru 		break;
3070cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
3071cc875c2eSYuval Mintz 		link->speed.forced_speed = 25000;
3072cc875c2eSYuval Mintz 		break;
3073cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
3074cc875c2eSYuval Mintz 		link->speed.forced_speed = 40000;
3075cc875c2eSYuval Mintz 		break;
3076cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
3077cc875c2eSYuval Mintz 		link->speed.forced_speed = 50000;
3078cc875c2eSYuval Mintz 		break;
3079351a4dedSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
3080cc875c2eSYuval Mintz 		link->speed.forced_speed = 100000;
3081cc875c2eSYuval Mintz 		break;
3082cc875c2eSYuval Mintz 	default:
30831a635e48SYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
3084cc875c2eSYuval Mintz 	}
3085cc875c2eSYuval Mintz 
308634f9199cSsudarsana.kalluru@cavium.com 	p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
308734f9199cSsudarsana.kalluru@cavium.com 		link->speed.autoneg;
308834f9199cSsudarsana.kalluru@cavium.com 
3089cc875c2eSYuval Mintz 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
3090cc875c2eSYuval Mintz 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
3091cc875c2eSYuval Mintz 	link->pause.autoneg = !!(link_temp &
3092cc875c2eSYuval Mintz 				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
3093cc875c2eSYuval Mintz 	link->pause.forced_rx = !!(link_temp &
3094cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
3095cc875c2eSYuval Mintz 	link->pause.forced_tx = !!(link_temp &
3096cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
3097cc875c2eSYuval Mintz 	link->loopback_mode = 0;
3098cc875c2eSYuval Mintz 
3099645874e5SSudarsana Reddy Kalluru 	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
3100645874e5SSudarsana Reddy Kalluru 		link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr +
3101645874e5SSudarsana Reddy Kalluru 				   offsetof(struct nvm_cfg1_port, ext_phy));
3102645874e5SSudarsana Reddy Kalluru 		link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
3103645874e5SSudarsana Reddy Kalluru 		link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
3104645874e5SSudarsana Reddy Kalluru 		p_caps->default_eee = QED_MCP_EEE_ENABLED;
3105645874e5SSudarsana Reddy Kalluru 		link->eee.enable = true;
3106645874e5SSudarsana Reddy Kalluru 		switch (link_temp) {
3107645874e5SSudarsana Reddy Kalluru 		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
3108645874e5SSudarsana Reddy Kalluru 			p_caps->default_eee = QED_MCP_EEE_DISABLED;
3109645874e5SSudarsana Reddy Kalluru 			link->eee.enable = false;
3110645874e5SSudarsana Reddy Kalluru 			break;
3111645874e5SSudarsana Reddy Kalluru 		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
3112645874e5SSudarsana Reddy Kalluru 			p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
3113645874e5SSudarsana Reddy Kalluru 			break;
3114645874e5SSudarsana Reddy Kalluru 		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
3115645874e5SSudarsana Reddy Kalluru 			p_caps->eee_lpi_timer =
3116645874e5SSudarsana Reddy Kalluru 			    EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
3117645874e5SSudarsana Reddy Kalluru 			break;
3118645874e5SSudarsana Reddy Kalluru 		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
3119645874e5SSudarsana Reddy Kalluru 			p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
3120645874e5SSudarsana Reddy Kalluru 			break;
3121645874e5SSudarsana Reddy Kalluru 		}
3122645874e5SSudarsana Reddy Kalluru 
3123645874e5SSudarsana Reddy Kalluru 		link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
3124645874e5SSudarsana Reddy Kalluru 		link->eee.tx_lpi_enable = link->eee.enable;
3125645874e5SSudarsana Reddy Kalluru 		link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV;
3126645874e5SSudarsana Reddy Kalluru 	} else {
3127645874e5SSudarsana Reddy Kalluru 		p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED;
3128645874e5SSudarsana Reddy Kalluru 	}
3129645874e5SSudarsana Reddy Kalluru 
3130645874e5SSudarsana Reddy Kalluru 	DP_VERBOSE(p_hwfn,
3131645874e5SSudarsana Reddy Kalluru 		   NETIF_MSG_LINK,
3132645874e5SSudarsana Reddy Kalluru 		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
3133645874e5SSudarsana Reddy Kalluru 		   link->speed.forced_speed,
3134645874e5SSudarsana Reddy Kalluru 		   link->speed.advertised_speeds,
3135645874e5SSudarsana Reddy Kalluru 		   link->speed.autoneg,
3136645874e5SSudarsana Reddy Kalluru 		   link->pause.autoneg,
3137645874e5SSudarsana Reddy Kalluru 		   p_caps->default_eee, p_caps->eee_lpi_timer);
3138cc875c2eSYuval Mintz 
3139b51bdfb9SSudarsana Reddy Kalluru 	if (IS_LEAD_HWFN(p_hwfn)) {
3140b51bdfb9SSudarsana Reddy Kalluru 		struct qed_dev *cdev = p_hwfn->cdev;
3141b51bdfb9SSudarsana Reddy Kalluru 
3142fe56b9e6SYuval Mintz 		/* Read Multi-function information from shmem */
3143fe56b9e6SYuval Mintz 		addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
3144fe56b9e6SYuval Mintz 		       offsetof(struct nvm_cfg1, glob) +
3145fe56b9e6SYuval Mintz 		       offsetof(struct nvm_cfg1_glob, generic_cont0);
3146fe56b9e6SYuval Mintz 
3147fe56b9e6SYuval Mintz 		generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
3148fe56b9e6SYuval Mintz 
3149fe56b9e6SYuval Mintz 		mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
3150fe56b9e6SYuval Mintz 			  NVM_CFG1_GLOB_MF_MODE_OFFSET;
3151fe56b9e6SYuval Mintz 
3152fe56b9e6SYuval Mintz 		switch (mf_mode) {
3153fe56b9e6SYuval Mintz 		case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
3154b51bdfb9SSudarsana Reddy Kalluru 			cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS);
3155b51bdfb9SSudarsana Reddy Kalluru 			break;
3156cac6f691SSudarsana Reddy Kalluru 		case NVM_CFG1_GLOB_MF_MODE_UFP:
3157cac6f691SSudarsana Reddy Kalluru 			cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
3158cac6f691SSudarsana Reddy Kalluru 					BIT(QED_MF_LLH_PROTO_CLSS) |
3159cac6f691SSudarsana Reddy Kalluru 					BIT(QED_MF_UFP_SPECIFIC) |
3160cac6f691SSudarsana Reddy Kalluru 					BIT(QED_MF_8021Q_TAGGING);
3161cac6f691SSudarsana Reddy Kalluru 			break;
3162b51bdfb9SSudarsana Reddy Kalluru 		case NVM_CFG1_GLOB_MF_MODE_BD:
3163b51bdfb9SSudarsana Reddy Kalluru 			cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
3164b51bdfb9SSudarsana Reddy Kalluru 					BIT(QED_MF_LLH_PROTO_CLSS) |
3165b51bdfb9SSudarsana Reddy Kalluru 					BIT(QED_MF_8021AD_TAGGING);
3166fe56b9e6SYuval Mintz 			break;
3167fe56b9e6SYuval Mintz 		case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
3168b51bdfb9SSudarsana Reddy Kalluru 			cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
31690bc5fe85SSudarsana Reddy Kalluru 					BIT(QED_MF_LLH_PROTO_CLSS) |
31700bc5fe85SSudarsana Reddy Kalluru 					BIT(QED_MF_LL2_NON_UNICAST) |
31710bc5fe85SSudarsana Reddy Kalluru 					BIT(QED_MF_INTER_PF_SWITCH);
3172fe56b9e6SYuval Mintz 			break;
3173fc48b7a6SYuval Mintz 		case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
3174b51bdfb9SSudarsana Reddy Kalluru 			cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
31750bc5fe85SSudarsana Reddy Kalluru 					BIT(QED_MF_LLH_PROTO_CLSS) |
31760bc5fe85SSudarsana Reddy Kalluru 					BIT(QED_MF_LL2_NON_UNICAST);
31770bc5fe85SSudarsana Reddy Kalluru 			if (QED_IS_BB(p_hwfn->cdev))
3178b51bdfb9SSudarsana Reddy Kalluru 				cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF);
3179fe56b9e6SYuval Mintz 			break;
3180fe56b9e6SYuval Mintz 		}
31810bc5fe85SSudarsana Reddy Kalluru 
31820bc5fe85SSudarsana Reddy Kalluru 		DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
3183b51bdfb9SSudarsana Reddy Kalluru 			cdev->mf_bits);
3184b51bdfb9SSudarsana Reddy Kalluru 	}
3185b51bdfb9SSudarsana Reddy Kalluru 
3186b51bdfb9SSudarsana Reddy Kalluru 	DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
31870bc5fe85SSudarsana Reddy Kalluru 		p_hwfn->cdev->mf_bits);
3188fe56b9e6SYuval Mintz 
3189b51bdfb9SSudarsana Reddy Kalluru 	/* Read device capabilities information from shmem */
3190fc48b7a6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
3191fc48b7a6SYuval Mintz 		offsetof(struct nvm_cfg1, glob) +
3192fc48b7a6SYuval Mintz 		offsetof(struct nvm_cfg1_glob, device_capabilities);
3193fc48b7a6SYuval Mintz 
3194fc48b7a6SYuval Mintz 	device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
3195fc48b7a6SYuval Mintz 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
3196fc48b7a6SYuval Mintz 		__set_bit(QED_DEV_CAP_ETH,
3197fc48b7a6SYuval Mintz 			  &p_hwfn->hw_info.device_capabilities);
31981e128c81SArun Easi 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
31991e128c81SArun Easi 		__set_bit(QED_DEV_CAP_FCOE,
32001e128c81SArun Easi 			  &p_hwfn->hw_info.device_capabilities);
3201c5ac9319SYuval Mintz 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
3202c5ac9319SYuval Mintz 		__set_bit(QED_DEV_CAP_ISCSI,
3203c5ac9319SYuval Mintz 			  &p_hwfn->hw_info.device_capabilities);
3204c5ac9319SYuval Mintz 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
3205c5ac9319SYuval Mintz 		__set_bit(QED_DEV_CAP_ROCE,
3206c5ac9319SYuval Mintz 			  &p_hwfn->hw_info.device_capabilities);
3207fc48b7a6SYuval Mintz 
3208fe56b9e6SYuval Mintz 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
3209fe56b9e6SYuval Mintz }
3210fe56b9e6SYuval Mintz 
32111408cc1fSYuval Mintz static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
32121408cc1fSYuval Mintz {
3213dbb799c3SYuval Mintz 	u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
3214dbb799c3SYuval Mintz 	u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
32159c79ddaaSMintz, Yuval 	struct qed_dev *cdev = p_hwfn->cdev;
32161408cc1fSYuval Mintz 
32179c79ddaaSMintz, Yuval 	num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
32181408cc1fSYuval Mintz 
32191408cc1fSYuval Mintz 	/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
32201408cc1fSYuval Mintz 	 * in the other bits are selected.
32211408cc1fSYuval Mintz 	 * Bits 1-15 are for functions 1-15, respectively, and their value is
32221408cc1fSYuval Mintz 	 * '0' only for enabled functions (function 0 always exists and
32231408cc1fSYuval Mintz 	 * enabled).
32241408cc1fSYuval Mintz 	 * In case of CMT, only the "even" functions are enabled, and thus the
32251408cc1fSYuval Mintz 	 * number of functions for both hwfns is learnt from the same bits.
32261408cc1fSYuval Mintz 	 */
32271408cc1fSYuval Mintz 	reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
32281408cc1fSYuval Mintz 
32291408cc1fSYuval Mintz 	if (reg_function_hide & 0x1) {
32309c79ddaaSMintz, Yuval 		if (QED_IS_BB(cdev)) {
32319c79ddaaSMintz, Yuval 			if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
32321408cc1fSYuval Mintz 				num_funcs = 0;
32331408cc1fSYuval Mintz 				eng_mask = 0xaaaa;
32341408cc1fSYuval Mintz 			} else {
32351408cc1fSYuval Mintz 				num_funcs = 1;
32361408cc1fSYuval Mintz 				eng_mask = 0x5554;
32371408cc1fSYuval Mintz 			}
32389c79ddaaSMintz, Yuval 		} else {
32399c79ddaaSMintz, Yuval 			num_funcs = 1;
32409c79ddaaSMintz, Yuval 			eng_mask = 0xfffe;
32419c79ddaaSMintz, Yuval 		}
32421408cc1fSYuval Mintz 
32431408cc1fSYuval Mintz 		/* Get the number of the enabled functions on the engine */
32441408cc1fSYuval Mintz 		tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
32451408cc1fSYuval Mintz 		while (tmp) {
32461408cc1fSYuval Mintz 			if (tmp & 0x1)
32471408cc1fSYuval Mintz 				num_funcs++;
32481408cc1fSYuval Mintz 			tmp >>= 0x1;
32491408cc1fSYuval Mintz 		}
3250dbb799c3SYuval Mintz 
3251dbb799c3SYuval Mintz 		/* Get the PF index within the enabled functions */
3252dbb799c3SYuval Mintz 		low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
3253dbb799c3SYuval Mintz 		tmp = reg_function_hide & eng_mask & low_pfs_mask;
3254dbb799c3SYuval Mintz 		while (tmp) {
3255dbb799c3SYuval Mintz 			if (tmp & 0x1)
3256dbb799c3SYuval Mintz 				enabled_func_idx--;
3257dbb799c3SYuval Mintz 			tmp >>= 0x1;
3258dbb799c3SYuval Mintz 		}
32591408cc1fSYuval Mintz 	}
32601408cc1fSYuval Mintz 
32611408cc1fSYuval Mintz 	p_hwfn->num_funcs_on_engine = num_funcs;
3262dbb799c3SYuval Mintz 	p_hwfn->enabled_func_idx = enabled_func_idx;
32631408cc1fSYuval Mintz 
32641408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
32651408cc1fSYuval Mintz 		   NETIF_MSG_PROBE,
3266525ef5c0SYuval Mintz 		   "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
32671408cc1fSYuval Mintz 		   p_hwfn->rel_pf_id,
32681408cc1fSYuval Mintz 		   p_hwfn->abs_pf_id,
3269525ef5c0SYuval Mintz 		   p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
32701408cc1fSYuval Mintz }
32711408cc1fSYuval Mintz 
32729c79ddaaSMintz, Yuval static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
32739c79ddaaSMintz, Yuval {
32740ebcebbeSSudarsana Reddy Kalluru 	u32 addr, global_offsize, global_addr, port_mode;
32750ebcebbeSSudarsana Reddy Kalluru 	struct qed_dev *cdev = p_hwfn->cdev;
32760ebcebbeSSudarsana Reddy Kalluru 
32770ebcebbeSSudarsana Reddy Kalluru 	/* In CMT there is always only one port */
32780ebcebbeSSudarsana Reddy Kalluru 	if (cdev->num_hwfns > 1) {
32790ebcebbeSSudarsana Reddy Kalluru 		cdev->num_ports_in_engine = 1;
32800ebcebbeSSudarsana Reddy Kalluru 		cdev->num_ports = 1;
32810ebcebbeSSudarsana Reddy Kalluru 		return;
32820ebcebbeSSudarsana Reddy Kalluru 	}
32830ebcebbeSSudarsana Reddy Kalluru 
32840ebcebbeSSudarsana Reddy Kalluru 	/* Determine the number of ports per engine */
32850ebcebbeSSudarsana Reddy Kalluru 	port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE);
32860ebcebbeSSudarsana Reddy Kalluru 	switch (port_mode) {
32870ebcebbeSSudarsana Reddy Kalluru 	case 0x0:
32880ebcebbeSSudarsana Reddy Kalluru 		cdev->num_ports_in_engine = 1;
32890ebcebbeSSudarsana Reddy Kalluru 		break;
32900ebcebbeSSudarsana Reddy Kalluru 	case 0x1:
32910ebcebbeSSudarsana Reddy Kalluru 		cdev->num_ports_in_engine = 2;
32920ebcebbeSSudarsana Reddy Kalluru 		break;
32930ebcebbeSSudarsana Reddy Kalluru 	case 0x2:
32940ebcebbeSSudarsana Reddy Kalluru 		cdev->num_ports_in_engine = 4;
32950ebcebbeSSudarsana Reddy Kalluru 		break;
32960ebcebbeSSudarsana Reddy Kalluru 	default:
32970ebcebbeSSudarsana Reddy Kalluru 		DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode);
32980ebcebbeSSudarsana Reddy Kalluru 		cdev->num_ports_in_engine = 1;	/* Default to something */
32990ebcebbeSSudarsana Reddy Kalluru 		break;
33000ebcebbeSSudarsana Reddy Kalluru 	}
33010ebcebbeSSudarsana Reddy Kalluru 
33020ebcebbeSSudarsana Reddy Kalluru 	/* Get the total number of ports of the device */
33030ebcebbeSSudarsana Reddy Kalluru 	addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
33040ebcebbeSSudarsana Reddy Kalluru 				    PUBLIC_GLOBAL);
33050ebcebbeSSudarsana Reddy Kalluru 	global_offsize = qed_rd(p_hwfn, p_ptt, addr);
33060ebcebbeSSudarsana Reddy Kalluru 	global_addr = SECTION_ADDR(global_offsize, 0);
33070ebcebbeSSudarsana Reddy Kalluru 	addr = global_addr + offsetof(struct public_global, max_ports);
33080ebcebbeSSudarsana Reddy Kalluru 	cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr);
33099c79ddaaSMintz, Yuval }
33109c79ddaaSMintz, Yuval 
3311645874e5SSudarsana Reddy Kalluru static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3312645874e5SSudarsana Reddy Kalluru {
3313645874e5SSudarsana Reddy Kalluru 	struct qed_mcp_link_capabilities *p_caps;
3314645874e5SSudarsana Reddy Kalluru 	u32 eee_status;
3315645874e5SSudarsana Reddy Kalluru 
3316645874e5SSudarsana Reddy Kalluru 	p_caps = &p_hwfn->mcp_info->link_capabilities;
3317645874e5SSudarsana Reddy Kalluru 	if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED)
3318645874e5SSudarsana Reddy Kalluru 		return;
3319645874e5SSudarsana Reddy Kalluru 
3320645874e5SSudarsana Reddy Kalluru 	p_caps->eee_speed_caps = 0;
3321645874e5SSudarsana Reddy Kalluru 	eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
3322645874e5SSudarsana Reddy Kalluru 			    offsetof(struct public_port, eee_status));
3323645874e5SSudarsana Reddy Kalluru 	eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
3324645874e5SSudarsana Reddy Kalluru 			EEE_SUPPORTED_SPEED_OFFSET;
3325645874e5SSudarsana Reddy Kalluru 
3326645874e5SSudarsana Reddy Kalluru 	if (eee_status & EEE_1G_SUPPORTED)
3327645874e5SSudarsana Reddy Kalluru 		p_caps->eee_speed_caps |= QED_EEE_1G_ADV;
3328645874e5SSudarsana Reddy Kalluru 	if (eee_status & EEE_10G_ADV)
3329645874e5SSudarsana Reddy Kalluru 		p_caps->eee_speed_caps |= QED_EEE_10G_ADV;
3330645874e5SSudarsana Reddy Kalluru }
3331645874e5SSudarsana Reddy Kalluru 
33329c79ddaaSMintz, Yuval static int
33339c79ddaaSMintz, Yuval qed_get_hw_info(struct qed_hwfn *p_hwfn,
33349c79ddaaSMintz, Yuval 		struct qed_ptt *p_ptt,
33359c79ddaaSMintz, Yuval 		enum qed_pci_personality personality)
33369c79ddaaSMintz, Yuval {
33379c79ddaaSMintz, Yuval 	int rc;
33389c79ddaaSMintz, Yuval 
33399c79ddaaSMintz, Yuval 	/* Since all information is common, only first hwfns should do this */
33409c79ddaaSMintz, Yuval 	if (IS_LEAD_HWFN(p_hwfn)) {
33419c79ddaaSMintz, Yuval 		rc = qed_iov_hw_info(p_hwfn);
33429c79ddaaSMintz, Yuval 		if (rc)
33439c79ddaaSMintz, Yuval 			return rc;
33449c79ddaaSMintz, Yuval 	}
33459c79ddaaSMintz, Yuval 
33460ebcebbeSSudarsana Reddy Kalluru 	if (IS_LEAD_HWFN(p_hwfn))
33479c79ddaaSMintz, Yuval 		qed_hw_info_port_num(p_hwfn, p_ptt);
3348fe56b9e6SYuval Mintz 
3349645874e5SSudarsana Reddy Kalluru 	qed_mcp_get_capabilities(p_hwfn, p_ptt);
3350645874e5SSudarsana Reddy Kalluru 
3351fe56b9e6SYuval Mintz 	qed_hw_get_nvm_info(p_hwfn, p_ptt);
3352fe56b9e6SYuval Mintz 
3353fe56b9e6SYuval Mintz 	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
3354fe56b9e6SYuval Mintz 	if (rc)
3355fe56b9e6SYuval Mintz 		return rc;
3356fe56b9e6SYuval Mintz 
3357fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn))
3358fe56b9e6SYuval Mintz 		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
3359fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.mac);
3360fe56b9e6SYuval Mintz 	else
3361fe56b9e6SYuval Mintz 		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
3362fe56b9e6SYuval Mintz 
3363fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
3364fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
3365fe56b9e6SYuval Mintz 			p_hwfn->hw_info.ovlan =
3366fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.ovlan;
3367fe56b9e6SYuval Mintz 
3368fe56b9e6SYuval Mintz 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
3369645874e5SSudarsana Reddy Kalluru 
3370645874e5SSudarsana Reddy Kalluru 		qed_get_eee_caps(p_hwfn, p_ptt);
3371cac6f691SSudarsana Reddy Kalluru 
3372cac6f691SSudarsana Reddy Kalluru 		qed_mcp_read_ufp_config(p_hwfn, p_ptt);
3373fe56b9e6SYuval Mintz 	}
3374fe56b9e6SYuval Mintz 
3375fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
3376fe56b9e6SYuval Mintz 		enum qed_pci_personality protocol;
3377fe56b9e6SYuval Mintz 
3378fe56b9e6SYuval Mintz 		protocol = p_hwfn->mcp_info->func_info.protocol;
3379fe56b9e6SYuval Mintz 		p_hwfn->hw_info.personality = protocol;
3380fe56b9e6SYuval Mintz 	}
3381fe56b9e6SYuval Mintz 
338261be82b0SDenis Bolotin 	if (QED_IS_ROCE_PERSONALITY(p_hwfn))
338361be82b0SDenis Bolotin 		p_hwfn->hw_info.multi_tc_roce_en = 1;
338461be82b0SDenis Bolotin 
3385b5a9ee7cSAriel Elior 	p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
3386b5a9ee7cSAriel Elior 	p_hwfn->hw_info.num_active_tc = 1;
3387b5a9ee7cSAriel Elior 
33881408cc1fSYuval Mintz 	qed_get_num_funcs(p_hwfn, p_ptt);
33891408cc1fSYuval Mintz 
33900fefbfbaSSudarsana Kalluru 	if (qed_mcp_is_init(p_hwfn))
33910fefbfbaSSudarsana Kalluru 		p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
33920fefbfbaSSudarsana Kalluru 
33939c8517c4STomer Tayar 	return qed_hw_get_resc(p_hwfn, p_ptt);
3394fe56b9e6SYuval Mintz }
3395fe56b9e6SYuval Mintz 
339615582962SRahul Verma static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3397fe56b9e6SYuval Mintz {
339815582962SRahul Verma 	struct qed_dev *cdev = p_hwfn->cdev;
33999c79ddaaSMintz, Yuval 	u16 device_id_mask;
3400fe56b9e6SYuval Mintz 	u32 tmp;
3401fe56b9e6SYuval Mintz 
3402fc48b7a6SYuval Mintz 	/* Read Vendor Id / Device Id */
34031a635e48SYuval Mintz 	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
34041a635e48SYuval Mintz 	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
34051a635e48SYuval Mintz 
34069c79ddaaSMintz, Yuval 	/* Determine type */
34079c79ddaaSMintz, Yuval 	device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
34089c79ddaaSMintz, Yuval 	switch (device_id_mask) {
34099c79ddaaSMintz, Yuval 	case QED_DEV_ID_MASK_BB:
34109c79ddaaSMintz, Yuval 		cdev->type = QED_DEV_TYPE_BB;
34119c79ddaaSMintz, Yuval 		break;
34129c79ddaaSMintz, Yuval 	case QED_DEV_ID_MASK_AH:
34139c79ddaaSMintz, Yuval 		cdev->type = QED_DEV_TYPE_AH;
34149c79ddaaSMintz, Yuval 		break;
34159c79ddaaSMintz, Yuval 	default:
34169c79ddaaSMintz, Yuval 		DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
34179c79ddaaSMintz, Yuval 		return -EBUSY;
34189c79ddaaSMintz, Yuval 	}
34199c79ddaaSMintz, Yuval 
342015582962SRahul Verma 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
342115582962SRahul Verma 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
342215582962SRahul Verma 
3423fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_REV, cdev->chip_rev);
3424fe56b9e6SYuval Mintz 
3425fe56b9e6SYuval Mintz 	/* Learn number of HW-functions */
342615582962SRahul Verma 	tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
3427fe56b9e6SYuval Mintz 
3428fc48b7a6SYuval Mintz 	if (tmp & (1 << p_hwfn->rel_pf_id)) {
3429fe56b9e6SYuval Mintz 		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
3430fe56b9e6SYuval Mintz 		cdev->num_hwfns = 2;
3431fe56b9e6SYuval Mintz 	} else {
3432fe56b9e6SYuval Mintz 		cdev->num_hwfns = 1;
3433fe56b9e6SYuval Mintz 	}
3434fe56b9e6SYuval Mintz 
343515582962SRahul Verma 	cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt,
3436fe56b9e6SYuval Mintz 				    MISCS_REG_CHIP_TEST_REG) >> 4;
3437fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
343815582962SRahul Verma 	cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
3439fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
3440fe56b9e6SYuval Mintz 
3441fe56b9e6SYuval Mintz 	DP_INFO(cdev->hwfns,
34429c79ddaaSMintz, Yuval 		"Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
34439c79ddaaSMintz, Yuval 		QED_IS_BB(cdev) ? "BB" : "AH",
34449c79ddaaSMintz, Yuval 		'A' + cdev->chip_rev,
34459c79ddaaSMintz, Yuval 		(int)cdev->chip_metal,
3446fe56b9e6SYuval Mintz 		cdev->chip_num, cdev->chip_rev,
3447fe56b9e6SYuval Mintz 		cdev->chip_bond_id, cdev->chip_metal);
344812e09c69SYuval Mintz 
344912e09c69SYuval Mintz 	return 0;
3450fe56b9e6SYuval Mintz }
3451fe56b9e6SYuval Mintz 
345243645ce0SSudarsana Reddy Kalluru static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
345343645ce0SSudarsana Reddy Kalluru {
345443645ce0SSudarsana Reddy Kalluru 	kfree(p_hwfn->nvm_info.image_att);
345543645ce0SSudarsana Reddy Kalluru 	p_hwfn->nvm_info.image_att = NULL;
345643645ce0SSudarsana Reddy Kalluru }
345743645ce0SSudarsana Reddy Kalluru 
3458fe56b9e6SYuval Mintz static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
3459fe56b9e6SYuval Mintz 				 void __iomem *p_regview,
3460fe56b9e6SYuval Mintz 				 void __iomem *p_doorbells,
3461fe56b9e6SYuval Mintz 				 enum qed_pci_personality personality)
3462fe56b9e6SYuval Mintz {
346364515dc8STomer Tayar 	struct qed_dev *cdev = p_hwfn->cdev;
3464fe56b9e6SYuval Mintz 	int rc = 0;
3465fe56b9e6SYuval Mintz 
3466fe56b9e6SYuval Mintz 	/* Split PCI bars evenly between hwfns */
3467fe56b9e6SYuval Mintz 	p_hwfn->regview = p_regview;
3468fe56b9e6SYuval Mintz 	p_hwfn->doorbells = p_doorbells;
3469fe56b9e6SYuval Mintz 
34701408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
34711408cc1fSYuval Mintz 		return qed_vf_hw_prepare(p_hwfn);
34721408cc1fSYuval Mintz 
3473fe56b9e6SYuval Mintz 	/* Validate that chip access is feasible */
3474fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
3475fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
3476fe56b9e6SYuval Mintz 		       "Reading the ME register returns all Fs; Preventing further chip access\n");
3477fe56b9e6SYuval Mintz 		return -EINVAL;
3478fe56b9e6SYuval Mintz 	}
3479fe56b9e6SYuval Mintz 
3480fe56b9e6SYuval Mintz 	get_function_id(p_hwfn);
3481fe56b9e6SYuval Mintz 
348212e09c69SYuval Mintz 	/* Allocate PTT pool */
348312e09c69SYuval Mintz 	rc = qed_ptt_pool_alloc(p_hwfn);
34842591c280SJoe Perches 	if (rc)
3485fe56b9e6SYuval Mintz 		goto err0;
3486fe56b9e6SYuval Mintz 
348712e09c69SYuval Mintz 	/* Allocate the main PTT */
348812e09c69SYuval Mintz 	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
348912e09c69SYuval Mintz 
3490fe56b9e6SYuval Mintz 	/* First hwfn learns basic information, e.g., number of hwfns */
349112e09c69SYuval Mintz 	if (!p_hwfn->my_id) {
349215582962SRahul Verma 		rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
34931a635e48SYuval Mintz 		if (rc)
349412e09c69SYuval Mintz 			goto err1;
349512e09c69SYuval Mintz 	}
349612e09c69SYuval Mintz 
349712e09c69SYuval Mintz 	qed_hw_hwfn_prepare(p_hwfn);
3498fe56b9e6SYuval Mintz 
3499fe56b9e6SYuval Mintz 	/* Initialize MCP structure */
3500fe56b9e6SYuval Mintz 	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
3501fe56b9e6SYuval Mintz 	if (rc) {
3502fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
3503fe56b9e6SYuval Mintz 		goto err1;
3504fe56b9e6SYuval Mintz 	}
3505fe56b9e6SYuval Mintz 
3506fe56b9e6SYuval Mintz 	/* Read the device configuration information from the HW and SHMEM */
3507fe56b9e6SYuval Mintz 	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
3508fe56b9e6SYuval Mintz 	if (rc) {
3509fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
3510fe56b9e6SYuval Mintz 		goto err2;
3511fe56b9e6SYuval Mintz 	}
3512fe56b9e6SYuval Mintz 
351318a69e36SMintz, Yuval 	/* Sending a mailbox to the MFW should be done after qed_get_hw_info()
351418a69e36SMintz, Yuval 	 * is called as it sets the ports number in an engine.
351518a69e36SMintz, Yuval 	 */
351664515dc8STomer Tayar 	if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) {
351718a69e36SMintz, Yuval 		rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
351818a69e36SMintz, Yuval 		if (rc)
351918a69e36SMintz, Yuval 			DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
352018a69e36SMintz, Yuval 	}
352118a69e36SMintz, Yuval 
352243645ce0SSudarsana Reddy Kalluru 	/* NVRAM info initialization and population */
352343645ce0SSudarsana Reddy Kalluru 	if (IS_LEAD_HWFN(p_hwfn)) {
352443645ce0SSudarsana Reddy Kalluru 		rc = qed_mcp_nvm_info_populate(p_hwfn);
352543645ce0SSudarsana Reddy Kalluru 		if (rc) {
352643645ce0SSudarsana Reddy Kalluru 			DP_NOTICE(p_hwfn,
352743645ce0SSudarsana Reddy Kalluru 				  "Failed to populate nvm info shadow\n");
352843645ce0SSudarsana Reddy Kalluru 			goto err2;
352943645ce0SSudarsana Reddy Kalluru 		}
353043645ce0SSudarsana Reddy Kalluru 	}
353143645ce0SSudarsana Reddy Kalluru 
3532fe56b9e6SYuval Mintz 	/* Allocate the init RT array and initialize the init-ops engine */
3533fe56b9e6SYuval Mintz 	rc = qed_init_alloc(p_hwfn);
35342591c280SJoe Perches 	if (rc)
353543645ce0SSudarsana Reddy Kalluru 		goto err3;
3536fe56b9e6SYuval Mintz 
3537fe56b9e6SYuval Mintz 	return rc;
353843645ce0SSudarsana Reddy Kalluru err3:
353943645ce0SSudarsana Reddy Kalluru 	if (IS_LEAD_HWFN(p_hwfn))
354043645ce0SSudarsana Reddy Kalluru 		qed_nvm_info_free(p_hwfn);
3541fe56b9e6SYuval Mintz err2:
354232a47e72SYuval Mintz 	if (IS_LEAD_HWFN(p_hwfn))
354332a47e72SYuval Mintz 		qed_iov_free_hw_info(p_hwfn->cdev);
3544fe56b9e6SYuval Mintz 	qed_mcp_free(p_hwfn);
3545fe56b9e6SYuval Mintz err1:
3546fe56b9e6SYuval Mintz 	qed_hw_hwfn_free(p_hwfn);
3547fe56b9e6SYuval Mintz err0:
3548fe56b9e6SYuval Mintz 	return rc;
3549fe56b9e6SYuval Mintz }
3550fe56b9e6SYuval Mintz 
3551fe56b9e6SYuval Mintz int qed_hw_prepare(struct qed_dev *cdev,
3552fe56b9e6SYuval Mintz 		   int personality)
3553fe56b9e6SYuval Mintz {
3554c78df14eSAriel Elior 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3555c78df14eSAriel Elior 	int rc;
3556fe56b9e6SYuval Mintz 
3557fe56b9e6SYuval Mintz 	/* Store the precompiled init data ptrs */
35581408cc1fSYuval Mintz 	if (IS_PF(cdev))
3559fe56b9e6SYuval Mintz 		qed_init_iro_array(cdev);
3560fe56b9e6SYuval Mintz 
3561fe56b9e6SYuval Mintz 	/* Initialize the first hwfn - will learn number of hwfns */
3562c78df14eSAriel Elior 	rc = qed_hw_prepare_single(p_hwfn,
3563c78df14eSAriel Elior 				   cdev->regview,
3564fe56b9e6SYuval Mintz 				   cdev->doorbells, personality);
3565fe56b9e6SYuval Mintz 	if (rc)
3566fe56b9e6SYuval Mintz 		return rc;
3567fe56b9e6SYuval Mintz 
3568c78df14eSAriel Elior 	personality = p_hwfn->hw_info.personality;
3569fe56b9e6SYuval Mintz 
3570fe56b9e6SYuval Mintz 	/* Initialize the rest of the hwfns */
3571c78df14eSAriel Elior 	if (cdev->num_hwfns > 1) {
3572fe56b9e6SYuval Mintz 		void __iomem *p_regview, *p_doorbell;
3573c78df14eSAriel Elior 		u8 __iomem *addr;
3574fe56b9e6SYuval Mintz 
3575c78df14eSAriel Elior 		/* adjust bar offset for second engine */
357615582962SRahul Verma 		addr = cdev->regview +
357715582962SRahul Verma 		       qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
357815582962SRahul Verma 				       BAR_ID_0) / 2;
3579c78df14eSAriel Elior 		p_regview = addr;
3580c78df14eSAriel Elior 
358115582962SRahul Verma 		addr = cdev->doorbells +
358215582962SRahul Verma 		       qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
358315582962SRahul Verma 				       BAR_ID_1) / 2;
3584c78df14eSAriel Elior 		p_doorbell = addr;
3585c78df14eSAriel Elior 
3586c78df14eSAriel Elior 		/* prepare second hw function */
3587c78df14eSAriel Elior 		rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
3588fe56b9e6SYuval Mintz 					   p_doorbell, personality);
3589c78df14eSAriel Elior 
3590c78df14eSAriel Elior 		/* in case of error, need to free the previously
3591c78df14eSAriel Elior 		 * initiliazed hwfn 0.
3592c78df14eSAriel Elior 		 */
3593fe56b9e6SYuval Mintz 		if (rc) {
35941408cc1fSYuval Mintz 			if (IS_PF(cdev)) {
3595c78df14eSAriel Elior 				qed_init_free(p_hwfn);
359643645ce0SSudarsana Reddy Kalluru 				qed_nvm_info_free(p_hwfn);
3597c78df14eSAriel Elior 				qed_mcp_free(p_hwfn);
3598c78df14eSAriel Elior 				qed_hw_hwfn_free(p_hwfn);
3599fe56b9e6SYuval Mintz 			}
3600fe56b9e6SYuval Mintz 		}
36011408cc1fSYuval Mintz 	}
3602fe56b9e6SYuval Mintz 
3603c78df14eSAriel Elior 	return rc;
3604fe56b9e6SYuval Mintz }
3605fe56b9e6SYuval Mintz 
3606fe56b9e6SYuval Mintz void qed_hw_remove(struct qed_dev *cdev)
3607fe56b9e6SYuval Mintz {
36080fefbfbaSSudarsana Kalluru 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3609fe56b9e6SYuval Mintz 	int i;
3610fe56b9e6SYuval Mintz 
36110fefbfbaSSudarsana Kalluru 	if (IS_PF(cdev))
36120fefbfbaSSudarsana Kalluru 		qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
36130fefbfbaSSudarsana Kalluru 					       QED_OV_DRIVER_STATE_NOT_LOADED);
36140fefbfbaSSudarsana Kalluru 
3615fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
3616fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3617fe56b9e6SYuval Mintz 
36181408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
36190b55e27dSYuval Mintz 			qed_vf_pf_release(p_hwfn);
36201408cc1fSYuval Mintz 			continue;
36211408cc1fSYuval Mintz 		}
36221408cc1fSYuval Mintz 
3623fe56b9e6SYuval Mintz 		qed_init_free(p_hwfn);
3624fe56b9e6SYuval Mintz 		qed_hw_hwfn_free(p_hwfn);
3625fe56b9e6SYuval Mintz 		qed_mcp_free(p_hwfn);
3626fe56b9e6SYuval Mintz 	}
362732a47e72SYuval Mintz 
362832a47e72SYuval Mintz 	qed_iov_free_hw_info(cdev);
362943645ce0SSudarsana Reddy Kalluru 
363043645ce0SSudarsana Reddy Kalluru 	qed_nvm_info_free(p_hwfn);
3631fe56b9e6SYuval Mintz }
3632fe56b9e6SYuval Mintz 
3633a91eb52aSYuval Mintz static void qed_chain_free_next_ptr(struct qed_dev *cdev,
3634a91eb52aSYuval Mintz 				    struct qed_chain *p_chain)
3635a91eb52aSYuval Mintz {
3636a91eb52aSYuval Mintz 	void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
3637a91eb52aSYuval Mintz 	dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
3638a91eb52aSYuval Mintz 	struct qed_chain_next *p_next;
3639a91eb52aSYuval Mintz 	u32 size, i;
3640a91eb52aSYuval Mintz 
3641a91eb52aSYuval Mintz 	if (!p_virt)
3642a91eb52aSYuval Mintz 		return;
3643a91eb52aSYuval Mintz 
3644a91eb52aSYuval Mintz 	size = p_chain->elem_size * p_chain->usable_per_page;
3645a91eb52aSYuval Mintz 
3646a91eb52aSYuval Mintz 	for (i = 0; i < p_chain->page_cnt; i++) {
3647a91eb52aSYuval Mintz 		if (!p_virt)
3648a91eb52aSYuval Mintz 			break;
3649a91eb52aSYuval Mintz 
3650a91eb52aSYuval Mintz 		p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
3651a91eb52aSYuval Mintz 		p_virt_next = p_next->next_virt;
3652a91eb52aSYuval Mintz 		p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
3653a91eb52aSYuval Mintz 
3654a91eb52aSYuval Mintz 		dma_free_coherent(&cdev->pdev->dev,
3655a91eb52aSYuval Mintz 				  QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
3656a91eb52aSYuval Mintz 
3657a91eb52aSYuval Mintz 		p_virt = p_virt_next;
3658a91eb52aSYuval Mintz 		p_phys = p_phys_next;
3659a91eb52aSYuval Mintz 	}
3660a91eb52aSYuval Mintz }
3661a91eb52aSYuval Mintz 
3662a91eb52aSYuval Mintz static void qed_chain_free_single(struct qed_dev *cdev,
3663a91eb52aSYuval Mintz 				  struct qed_chain *p_chain)
3664a91eb52aSYuval Mintz {
3665a91eb52aSYuval Mintz 	if (!p_chain->p_virt_addr)
3666a91eb52aSYuval Mintz 		return;
3667a91eb52aSYuval Mintz 
3668a91eb52aSYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
3669a91eb52aSYuval Mintz 			  QED_CHAIN_PAGE_SIZE,
3670a91eb52aSYuval Mintz 			  p_chain->p_virt_addr, p_chain->p_phys_addr);
3671a91eb52aSYuval Mintz }
3672a91eb52aSYuval Mintz 
3673a91eb52aSYuval Mintz static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
3674a91eb52aSYuval Mintz {
3675a91eb52aSYuval Mintz 	void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
3676a91eb52aSYuval Mintz 	u32 page_cnt = p_chain->page_cnt, i, pbl_size;
36776d937acfSMintz, Yuval 	u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
3678a91eb52aSYuval Mintz 
3679a91eb52aSYuval Mintz 	if (!pp_virt_addr_tbl)
3680a91eb52aSYuval Mintz 		return;
3681a91eb52aSYuval Mintz 
36826d937acfSMintz, Yuval 	if (!p_pbl_virt)
3683a91eb52aSYuval Mintz 		goto out;
3684a91eb52aSYuval Mintz 
3685a91eb52aSYuval Mintz 	for (i = 0; i < page_cnt; i++) {
3686a91eb52aSYuval Mintz 		if (!pp_virt_addr_tbl[i])
3687a91eb52aSYuval Mintz 			break;
3688a91eb52aSYuval Mintz 
3689a91eb52aSYuval Mintz 		dma_free_coherent(&cdev->pdev->dev,
3690a91eb52aSYuval Mintz 				  QED_CHAIN_PAGE_SIZE,
3691a91eb52aSYuval Mintz 				  pp_virt_addr_tbl[i],
3692a91eb52aSYuval Mintz 				  *(dma_addr_t *)p_pbl_virt);
3693a91eb52aSYuval Mintz 
3694a91eb52aSYuval Mintz 		p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
3695a91eb52aSYuval Mintz 	}
3696a91eb52aSYuval Mintz 
3697a91eb52aSYuval Mintz 	pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
36981a4a6975SMintz, Yuval 
36991a4a6975SMintz, Yuval 	if (!p_chain->b_external_pbl)
3700a91eb52aSYuval Mintz 		dma_free_coherent(&cdev->pdev->dev,
3701a91eb52aSYuval Mintz 				  pbl_size,
37026d937acfSMintz, Yuval 				  p_chain->pbl_sp.p_virt_table,
37036d937acfSMintz, Yuval 				  p_chain->pbl_sp.p_phys_table);
3704a91eb52aSYuval Mintz out:
3705a91eb52aSYuval Mintz 	vfree(p_chain->pbl.pp_virt_addr_tbl);
37061a4a6975SMintz, Yuval 	p_chain->pbl.pp_virt_addr_tbl = NULL;
3707a91eb52aSYuval Mintz }
3708a91eb52aSYuval Mintz 
3709a91eb52aSYuval Mintz void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
3710a91eb52aSYuval Mintz {
3711a91eb52aSYuval Mintz 	switch (p_chain->mode) {
3712a91eb52aSYuval Mintz 	case QED_CHAIN_MODE_NEXT_PTR:
3713a91eb52aSYuval Mintz 		qed_chain_free_next_ptr(cdev, p_chain);
3714a91eb52aSYuval Mintz 		break;
3715a91eb52aSYuval Mintz 	case QED_CHAIN_MODE_SINGLE:
3716a91eb52aSYuval Mintz 		qed_chain_free_single(cdev, p_chain);
3717a91eb52aSYuval Mintz 		break;
3718a91eb52aSYuval Mintz 	case QED_CHAIN_MODE_PBL:
3719a91eb52aSYuval Mintz 		qed_chain_free_pbl(cdev, p_chain);
3720a91eb52aSYuval Mintz 		break;
3721a91eb52aSYuval Mintz 	}
3722a91eb52aSYuval Mintz }
3723a91eb52aSYuval Mintz 
3724a91eb52aSYuval Mintz static int
3725a91eb52aSYuval Mintz qed_chain_alloc_sanity_check(struct qed_dev *cdev,
3726a91eb52aSYuval Mintz 			     enum qed_chain_cnt_type cnt_type,
3727a91eb52aSYuval Mintz 			     size_t elem_size, u32 page_cnt)
3728a91eb52aSYuval Mintz {
3729a91eb52aSYuval Mintz 	u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
3730a91eb52aSYuval Mintz 
3731a91eb52aSYuval Mintz 	/* The actual chain size can be larger than the maximal possible value
3732a91eb52aSYuval Mintz 	 * after rounding up the requested elements number to pages, and after
3733a91eb52aSYuval Mintz 	 * taking into acount the unusuable elements (next-ptr elements).
3734a91eb52aSYuval Mintz 	 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
3735a91eb52aSYuval Mintz 	 * size/capacity fields are of a u32 type.
3736a91eb52aSYuval Mintz 	 */
3737a91eb52aSYuval Mintz 	if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
37383ef310a7STomer Tayar 	     chain_size > ((u32)U16_MAX + 1)) ||
37393ef310a7STomer Tayar 	    (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
3740a91eb52aSYuval Mintz 		DP_NOTICE(cdev,
3741a91eb52aSYuval Mintz 			  "The actual chain size (0x%llx) is larger than the maximal possible value\n",
3742a91eb52aSYuval Mintz 			  chain_size);
3743a91eb52aSYuval Mintz 		return -EINVAL;
3744a91eb52aSYuval Mintz 	}
3745a91eb52aSYuval Mintz 
3746a91eb52aSYuval Mintz 	return 0;
3747a91eb52aSYuval Mintz }
3748a91eb52aSYuval Mintz 
3749a91eb52aSYuval Mintz static int
3750a91eb52aSYuval Mintz qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
3751a91eb52aSYuval Mintz {
3752a91eb52aSYuval Mintz 	void *p_virt = NULL, *p_virt_prev = NULL;
3753a91eb52aSYuval Mintz 	dma_addr_t p_phys = 0;
3754a91eb52aSYuval Mintz 	u32 i;
3755a91eb52aSYuval Mintz 
3756a91eb52aSYuval Mintz 	for (i = 0; i < p_chain->page_cnt; i++) {
3757a91eb52aSYuval Mintz 		p_virt = dma_alloc_coherent(&cdev->pdev->dev,
3758a91eb52aSYuval Mintz 					    QED_CHAIN_PAGE_SIZE,
3759a91eb52aSYuval Mintz 					    &p_phys, GFP_KERNEL);
37602591c280SJoe Perches 		if (!p_virt)
3761a91eb52aSYuval Mintz 			return -ENOMEM;
3762a91eb52aSYuval Mintz 
3763a91eb52aSYuval Mintz 		if (i == 0) {
3764a91eb52aSYuval Mintz 			qed_chain_init_mem(p_chain, p_virt, p_phys);
3765a91eb52aSYuval Mintz 			qed_chain_reset(p_chain);
3766a91eb52aSYuval Mintz 		} else {
3767a91eb52aSYuval Mintz 			qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
3768a91eb52aSYuval Mintz 						     p_virt, p_phys);
3769a91eb52aSYuval Mintz 		}
3770a91eb52aSYuval Mintz 
3771a91eb52aSYuval Mintz 		p_virt_prev = p_virt;
3772a91eb52aSYuval Mintz 	}
3773a91eb52aSYuval Mintz 	/* Last page's next element should point to the beginning of the
3774a91eb52aSYuval Mintz 	 * chain.
3775a91eb52aSYuval Mintz 	 */
3776a91eb52aSYuval Mintz 	qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
3777a91eb52aSYuval Mintz 				     p_chain->p_virt_addr,
3778a91eb52aSYuval Mintz 				     p_chain->p_phys_addr);
3779a91eb52aSYuval Mintz 
3780a91eb52aSYuval Mintz 	return 0;
3781a91eb52aSYuval Mintz }
3782a91eb52aSYuval Mintz 
3783a91eb52aSYuval Mintz static int
3784a91eb52aSYuval Mintz qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
3785a91eb52aSYuval Mintz {
3786a91eb52aSYuval Mintz 	dma_addr_t p_phys = 0;
3787a91eb52aSYuval Mintz 	void *p_virt = NULL;
3788a91eb52aSYuval Mintz 
3789a91eb52aSYuval Mintz 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
3790a91eb52aSYuval Mintz 				    QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
37912591c280SJoe Perches 	if (!p_virt)
3792a91eb52aSYuval Mintz 		return -ENOMEM;
3793a91eb52aSYuval Mintz 
3794a91eb52aSYuval Mintz 	qed_chain_init_mem(p_chain, p_virt, p_phys);
3795a91eb52aSYuval Mintz 	qed_chain_reset(p_chain);
3796a91eb52aSYuval Mintz 
3797a91eb52aSYuval Mintz 	return 0;
3798a91eb52aSYuval Mintz }
3799a91eb52aSYuval Mintz 
38001a4a6975SMintz, Yuval static int
38011a4a6975SMintz, Yuval qed_chain_alloc_pbl(struct qed_dev *cdev,
38021a4a6975SMintz, Yuval 		    struct qed_chain *p_chain,
38031a4a6975SMintz, Yuval 		    struct qed_chain_ext_pbl *ext_pbl)
3804a91eb52aSYuval Mintz {
3805a91eb52aSYuval Mintz 	u32 page_cnt = p_chain->page_cnt, size, i;
3806a91eb52aSYuval Mintz 	dma_addr_t p_phys = 0, p_pbl_phys = 0;
3807a91eb52aSYuval Mintz 	void **pp_virt_addr_tbl = NULL;
3808a91eb52aSYuval Mintz 	u8 *p_pbl_virt = NULL;
3809a91eb52aSYuval Mintz 	void *p_virt = NULL;
3810a91eb52aSYuval Mintz 
3811a91eb52aSYuval Mintz 	size = page_cnt * sizeof(*pp_virt_addr_tbl);
38122591c280SJoe Perches 	pp_virt_addr_tbl = vzalloc(size);
38132591c280SJoe Perches 	if (!pp_virt_addr_tbl)
3814a91eb52aSYuval Mintz 		return -ENOMEM;
3815a91eb52aSYuval Mintz 
3816a91eb52aSYuval Mintz 	/* The allocation of the PBL table is done with its full size, since it
3817a91eb52aSYuval Mintz 	 * is expected to be successive.
3818a91eb52aSYuval Mintz 	 * qed_chain_init_pbl_mem() is called even in a case of an allocation
3819a91eb52aSYuval Mintz 	 * failure, since pp_virt_addr_tbl was previously allocated, and it
3820a91eb52aSYuval Mintz 	 * should be saved to allow its freeing during the error flow.
3821a91eb52aSYuval Mintz 	 */
3822a91eb52aSYuval Mintz 	size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
38231a4a6975SMintz, Yuval 
38241a4a6975SMintz, Yuval 	if (!ext_pbl) {
3825a91eb52aSYuval Mintz 		p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
3826a91eb52aSYuval Mintz 						size, &p_pbl_phys, GFP_KERNEL);
38271a4a6975SMintz, Yuval 	} else {
38281a4a6975SMintz, Yuval 		p_pbl_virt = ext_pbl->p_pbl_virt;
38291a4a6975SMintz, Yuval 		p_pbl_phys = ext_pbl->p_pbl_phys;
38301a4a6975SMintz, Yuval 		p_chain->b_external_pbl = true;
38311a4a6975SMintz, Yuval 	}
38321a4a6975SMintz, Yuval 
3833a91eb52aSYuval Mintz 	qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
3834a91eb52aSYuval Mintz 			       pp_virt_addr_tbl);
38352591c280SJoe Perches 	if (!p_pbl_virt)
3836a91eb52aSYuval Mintz 		return -ENOMEM;
3837a91eb52aSYuval Mintz 
3838a91eb52aSYuval Mintz 	for (i = 0; i < page_cnt; i++) {
3839a91eb52aSYuval Mintz 		p_virt = dma_alloc_coherent(&cdev->pdev->dev,
3840a91eb52aSYuval Mintz 					    QED_CHAIN_PAGE_SIZE,
3841a91eb52aSYuval Mintz 					    &p_phys, GFP_KERNEL);
38422591c280SJoe Perches 		if (!p_virt)
3843a91eb52aSYuval Mintz 			return -ENOMEM;
3844a91eb52aSYuval Mintz 
3845a91eb52aSYuval Mintz 		if (i == 0) {
3846a91eb52aSYuval Mintz 			qed_chain_init_mem(p_chain, p_virt, p_phys);
3847a91eb52aSYuval Mintz 			qed_chain_reset(p_chain);
3848a91eb52aSYuval Mintz 		}
3849a91eb52aSYuval Mintz 
3850a91eb52aSYuval Mintz 		/* Fill the PBL table with the physical address of the page */
3851a91eb52aSYuval Mintz 		*(dma_addr_t *)p_pbl_virt = p_phys;
3852a91eb52aSYuval Mintz 		/* Keep the virtual address of the page */
3853a91eb52aSYuval Mintz 		p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
3854a91eb52aSYuval Mintz 
3855a91eb52aSYuval Mintz 		p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
3856a91eb52aSYuval Mintz 	}
3857a91eb52aSYuval Mintz 
3858a91eb52aSYuval Mintz 	return 0;
3859a91eb52aSYuval Mintz }
3860a91eb52aSYuval Mintz 
3861fe56b9e6SYuval Mintz int qed_chain_alloc(struct qed_dev *cdev,
3862fe56b9e6SYuval Mintz 		    enum qed_chain_use_mode intended_use,
3863fe56b9e6SYuval Mintz 		    enum qed_chain_mode mode,
3864a91eb52aSYuval Mintz 		    enum qed_chain_cnt_type cnt_type,
38651a4a6975SMintz, Yuval 		    u32 num_elems,
38661a4a6975SMintz, Yuval 		    size_t elem_size,
38671a4a6975SMintz, Yuval 		    struct qed_chain *p_chain,
38681a4a6975SMintz, Yuval 		    struct qed_chain_ext_pbl *ext_pbl)
3869fe56b9e6SYuval Mintz {
3870a91eb52aSYuval Mintz 	u32 page_cnt;
3871a91eb52aSYuval Mintz 	int rc = 0;
3872fe56b9e6SYuval Mintz 
3873fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_SINGLE)
3874fe56b9e6SYuval Mintz 		page_cnt = 1;
3875fe56b9e6SYuval Mintz 	else
3876fe56b9e6SYuval Mintz 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
3877fe56b9e6SYuval Mintz 
3878a91eb52aSYuval Mintz 	rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
3879a91eb52aSYuval Mintz 	if (rc) {
3880a91eb52aSYuval Mintz 		DP_NOTICE(cdev,
38812591c280SJoe Perches 			  "Cannot allocate a chain with the given arguments:\n");
38822591c280SJoe Perches 		DP_NOTICE(cdev,
3883a91eb52aSYuval Mintz 			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
3884a91eb52aSYuval Mintz 			  intended_use, mode, cnt_type, num_elems, elem_size);
3885a91eb52aSYuval Mintz 		return rc;
3886fe56b9e6SYuval Mintz 	}
3887fe56b9e6SYuval Mintz 
3888a91eb52aSYuval Mintz 	qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
3889a91eb52aSYuval Mintz 			      mode, cnt_type);
3890fe56b9e6SYuval Mintz 
3891a91eb52aSYuval Mintz 	switch (mode) {
3892a91eb52aSYuval Mintz 	case QED_CHAIN_MODE_NEXT_PTR:
3893a91eb52aSYuval Mintz 		rc = qed_chain_alloc_next_ptr(cdev, p_chain);
3894a91eb52aSYuval Mintz 		break;
3895a91eb52aSYuval Mintz 	case QED_CHAIN_MODE_SINGLE:
3896a91eb52aSYuval Mintz 		rc = qed_chain_alloc_single(cdev, p_chain);
3897a91eb52aSYuval Mintz 		break;
3898a91eb52aSYuval Mintz 	case QED_CHAIN_MODE_PBL:
38991a4a6975SMintz, Yuval 		rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
3900a91eb52aSYuval Mintz 		break;
3901fe56b9e6SYuval Mintz 	}
3902a91eb52aSYuval Mintz 	if (rc)
3903a91eb52aSYuval Mintz 		goto nomem;
3904fe56b9e6SYuval Mintz 
3905fe56b9e6SYuval Mintz 	return 0;
3906fe56b9e6SYuval Mintz 
3907fe56b9e6SYuval Mintz nomem:
3908a91eb52aSYuval Mintz 	qed_chain_free(cdev, p_chain);
3909a91eb52aSYuval Mintz 	return rc;
3910fe56b9e6SYuval Mintz }
3911fe56b9e6SYuval Mintz 
3912a91eb52aSYuval Mintz int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
3913cee4d264SManish Chopra {
3914cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
3915cee4d264SManish Chopra 		u16 min, max;
3916cee4d264SManish Chopra 
3917cee4d264SManish Chopra 		min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
3918cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
3919cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
3920cee4d264SManish Chopra 			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
3921cee4d264SManish Chopra 			  src_id, min, max);
3922cee4d264SManish Chopra 
3923cee4d264SManish Chopra 		return -EINVAL;
3924cee4d264SManish Chopra 	}
3925cee4d264SManish Chopra 
3926cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
3927cee4d264SManish Chopra 
3928cee4d264SManish Chopra 	return 0;
3929cee4d264SManish Chopra }
3930cee4d264SManish Chopra 
39311a635e48SYuval Mintz int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
3932cee4d264SManish Chopra {
3933cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
3934cee4d264SManish Chopra 		u8 min, max;
3935cee4d264SManish Chopra 
3936cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_VPORT);
3937cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_VPORT);
3938cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
3939cee4d264SManish Chopra 			  "vport id [%d] is not valid, available indices [%d - %d]\n",
3940cee4d264SManish Chopra 			  src_id, min, max);
3941cee4d264SManish Chopra 
3942cee4d264SManish Chopra 		return -EINVAL;
3943cee4d264SManish Chopra 	}
3944cee4d264SManish Chopra 
3945cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
3946cee4d264SManish Chopra 
3947cee4d264SManish Chopra 	return 0;
3948cee4d264SManish Chopra }
3949cee4d264SManish Chopra 
39501a635e48SYuval Mintz int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
3951cee4d264SManish Chopra {
3952cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
3953cee4d264SManish Chopra 		u8 min, max;
3954cee4d264SManish Chopra 
3955cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
3956cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
3957cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
3958cee4d264SManish Chopra 			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
3959cee4d264SManish Chopra 			  src_id, min, max);
3960cee4d264SManish Chopra 
3961cee4d264SManish Chopra 		return -EINVAL;
3962cee4d264SManish Chopra 	}
3963cee4d264SManish Chopra 
3964cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
3965cee4d264SManish Chopra 
3966cee4d264SManish Chopra 	return 0;
3967cee4d264SManish Chopra }
3968bcd197c8SManish Chopra 
39690a7fb11cSYuval Mintz static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
39700a7fb11cSYuval Mintz 				  u8 *p_filter)
39710a7fb11cSYuval Mintz {
39720a7fb11cSYuval Mintz 	*p_high = p_filter[1] | (p_filter[0] << 8);
39730a7fb11cSYuval Mintz 	*p_low = p_filter[5] | (p_filter[4] << 8) |
39740a7fb11cSYuval Mintz 		 (p_filter[3] << 16) | (p_filter[2] << 24);
39750a7fb11cSYuval Mintz }
39760a7fb11cSYuval Mintz 
39770a7fb11cSYuval Mintz int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
39780a7fb11cSYuval Mintz 			   struct qed_ptt *p_ptt, u8 *p_filter)
39790a7fb11cSYuval Mintz {
39800a7fb11cSYuval Mintz 	u32 high = 0, low = 0, en;
39810a7fb11cSYuval Mintz 	int i;
39820a7fb11cSYuval Mintz 
39830bc5fe85SSudarsana Reddy Kalluru 	if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
39840a7fb11cSYuval Mintz 		return 0;
39850a7fb11cSYuval Mintz 
39860a7fb11cSYuval Mintz 	qed_llh_mac_to_filter(&high, &low, p_filter);
39870a7fb11cSYuval Mintz 
39880a7fb11cSYuval Mintz 	/* Find a free entry and utilize it */
39890a7fb11cSYuval Mintz 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
39900a7fb11cSYuval Mintz 		en = qed_rd(p_hwfn, p_ptt,
39910a7fb11cSYuval Mintz 			    NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
39920a7fb11cSYuval Mintz 		if (en)
39930a7fb11cSYuval Mintz 			continue;
39940a7fb11cSYuval Mintz 		qed_wr(p_hwfn, p_ptt,
39950a7fb11cSYuval Mintz 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
39960a7fb11cSYuval Mintz 		       2 * i * sizeof(u32), low);
39970a7fb11cSYuval Mintz 		qed_wr(p_hwfn, p_ptt,
39980a7fb11cSYuval Mintz 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
39990a7fb11cSYuval Mintz 		       (2 * i + 1) * sizeof(u32), high);
40000a7fb11cSYuval Mintz 		qed_wr(p_hwfn, p_ptt,
40010a7fb11cSYuval Mintz 		       NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
40020a7fb11cSYuval Mintz 		qed_wr(p_hwfn, p_ptt,
40030a7fb11cSYuval Mintz 		       NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
40040a7fb11cSYuval Mintz 		       i * sizeof(u32), 0);
40050a7fb11cSYuval Mintz 		qed_wr(p_hwfn, p_ptt,
40060a7fb11cSYuval Mintz 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
40070a7fb11cSYuval Mintz 		break;
40080a7fb11cSYuval Mintz 	}
40090a7fb11cSYuval Mintz 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
40100a7fb11cSYuval Mintz 		DP_NOTICE(p_hwfn,
40110a7fb11cSYuval Mintz 			  "Failed to find an empty LLH filter to utilize\n");
40120a7fb11cSYuval Mintz 		return -EINVAL;
40130a7fb11cSYuval Mintz 	}
40140a7fb11cSYuval Mintz 
40150a7fb11cSYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
40160a7fb11cSYuval Mintz 		   "mac: %pM is added at %d\n",
40170a7fb11cSYuval Mintz 		   p_filter, i);
40180a7fb11cSYuval Mintz 
40190a7fb11cSYuval Mintz 	return 0;
40200a7fb11cSYuval Mintz }
40210a7fb11cSYuval Mintz 
40220a7fb11cSYuval Mintz void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
40230a7fb11cSYuval Mintz 			       struct qed_ptt *p_ptt, u8 *p_filter)
40240a7fb11cSYuval Mintz {
40250a7fb11cSYuval Mintz 	u32 high = 0, low = 0;
40260a7fb11cSYuval Mintz 	int i;
40270a7fb11cSYuval Mintz 
40280bc5fe85SSudarsana Reddy Kalluru 	if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
40290a7fb11cSYuval Mintz 		return;
40300a7fb11cSYuval Mintz 
40310a7fb11cSYuval Mintz 	qed_llh_mac_to_filter(&high, &low, p_filter);
40320a7fb11cSYuval Mintz 
40330a7fb11cSYuval Mintz 	/* Find the entry and clean it */
40340a7fb11cSYuval Mintz 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
40350a7fb11cSYuval Mintz 		if (qed_rd(p_hwfn, p_ptt,
40360a7fb11cSYuval Mintz 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
40370a7fb11cSYuval Mintz 			   2 * i * sizeof(u32)) != low)
40380a7fb11cSYuval Mintz 			continue;
40390a7fb11cSYuval Mintz 		if (qed_rd(p_hwfn, p_ptt,
40400a7fb11cSYuval Mintz 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
40410a7fb11cSYuval Mintz 			   (2 * i + 1) * sizeof(u32)) != high)
40420a7fb11cSYuval Mintz 			continue;
40430a7fb11cSYuval Mintz 
40440a7fb11cSYuval Mintz 		qed_wr(p_hwfn, p_ptt,
40450a7fb11cSYuval Mintz 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
40460a7fb11cSYuval Mintz 		qed_wr(p_hwfn, p_ptt,
40470a7fb11cSYuval Mintz 		       NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
40480a7fb11cSYuval Mintz 		qed_wr(p_hwfn, p_ptt,
40490a7fb11cSYuval Mintz 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
40500a7fb11cSYuval Mintz 		       (2 * i + 1) * sizeof(u32), 0);
40510a7fb11cSYuval Mintz 
40520a7fb11cSYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
40530a7fb11cSYuval Mintz 			   "mac: %pM is removed from %d\n",
40540a7fb11cSYuval Mintz 			   p_filter, i);
40550a7fb11cSYuval Mintz 		break;
40560a7fb11cSYuval Mintz 	}
40570a7fb11cSYuval Mintz 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
40580a7fb11cSYuval Mintz 		DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
40590a7fb11cSYuval Mintz }
40600a7fb11cSYuval Mintz 
40611e128c81SArun Easi int
40621e128c81SArun Easi qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
40631e128c81SArun Easi 			    struct qed_ptt *p_ptt,
40641e128c81SArun Easi 			    u16 source_port_or_eth_type,
40651e128c81SArun Easi 			    u16 dest_port, enum qed_llh_port_filter_type_t type)
40661e128c81SArun Easi {
40671e128c81SArun Easi 	u32 high = 0, low = 0, en;
40681e128c81SArun Easi 	int i;
40691e128c81SArun Easi 
40700bc5fe85SSudarsana Reddy Kalluru 	if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
40711e128c81SArun Easi 		return 0;
40721e128c81SArun Easi 
40731e128c81SArun Easi 	switch (type) {
40741e128c81SArun Easi 	case QED_LLH_FILTER_ETHERTYPE:
40751e128c81SArun Easi 		high = source_port_or_eth_type;
40761e128c81SArun Easi 		break;
40771e128c81SArun Easi 	case QED_LLH_FILTER_TCP_SRC_PORT:
40781e128c81SArun Easi 	case QED_LLH_FILTER_UDP_SRC_PORT:
40791e128c81SArun Easi 		low = source_port_or_eth_type << 16;
40801e128c81SArun Easi 		break;
40811e128c81SArun Easi 	case QED_LLH_FILTER_TCP_DEST_PORT:
40821e128c81SArun Easi 	case QED_LLH_FILTER_UDP_DEST_PORT:
40831e128c81SArun Easi 		low = dest_port;
40841e128c81SArun Easi 		break;
40851e128c81SArun Easi 	case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
40861e128c81SArun Easi 	case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
40871e128c81SArun Easi 		low = (source_port_or_eth_type << 16) | dest_port;
40881e128c81SArun Easi 		break;
40891e128c81SArun Easi 	default:
40901e128c81SArun Easi 		DP_NOTICE(p_hwfn,
40911e128c81SArun Easi 			  "Non valid LLH protocol filter type %d\n", type);
40921e128c81SArun Easi 		return -EINVAL;
40931e128c81SArun Easi 	}
40941e128c81SArun Easi 	/* Find a free entry and utilize it */
40951e128c81SArun Easi 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
40961e128c81SArun Easi 		en = qed_rd(p_hwfn, p_ptt,
40971e128c81SArun Easi 			    NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
40981e128c81SArun Easi 		if (en)
40991e128c81SArun Easi 			continue;
41001e128c81SArun Easi 		qed_wr(p_hwfn, p_ptt,
41011e128c81SArun Easi 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
41021e128c81SArun Easi 		       2 * i * sizeof(u32), low);
41031e128c81SArun Easi 		qed_wr(p_hwfn, p_ptt,
41041e128c81SArun Easi 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
41051e128c81SArun Easi 		       (2 * i + 1) * sizeof(u32), high);
41061e128c81SArun Easi 		qed_wr(p_hwfn, p_ptt,
41071e128c81SArun Easi 		       NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
41081e128c81SArun Easi 		qed_wr(p_hwfn, p_ptt,
41091e128c81SArun Easi 		       NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
41101e128c81SArun Easi 		       i * sizeof(u32), 1 << type);
41111e128c81SArun Easi 		qed_wr(p_hwfn, p_ptt,
41121e128c81SArun Easi 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
41131e128c81SArun Easi 		break;
41141e128c81SArun Easi 	}
41151e128c81SArun Easi 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
41161e128c81SArun Easi 		DP_NOTICE(p_hwfn,
41171e128c81SArun Easi 			  "Failed to find an empty LLH filter to utilize\n");
41181e128c81SArun Easi 		return -EINVAL;
41191e128c81SArun Easi 	}
41201e128c81SArun Easi 	switch (type) {
41211e128c81SArun Easi 	case QED_LLH_FILTER_ETHERTYPE:
41221e128c81SArun Easi 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
41231e128c81SArun Easi 			   "ETH type %x is added at %d\n",
41241e128c81SArun Easi 			   source_port_or_eth_type, i);
41251e128c81SArun Easi 		break;
41261e128c81SArun Easi 	case QED_LLH_FILTER_TCP_SRC_PORT:
41271e128c81SArun Easi 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
41281e128c81SArun Easi 			   "TCP src port %x is added at %d\n",
41291e128c81SArun Easi 			   source_port_or_eth_type, i);
41301e128c81SArun Easi 		break;
41311e128c81SArun Easi 	case QED_LLH_FILTER_UDP_SRC_PORT:
41321e128c81SArun Easi 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
41331e128c81SArun Easi 			   "UDP src port %x is added at %d\n",
41341e128c81SArun Easi 			   source_port_or_eth_type, i);
41351e128c81SArun Easi 		break;
41361e128c81SArun Easi 	case QED_LLH_FILTER_TCP_DEST_PORT:
41371e128c81SArun Easi 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
41381e128c81SArun Easi 			   "TCP dst port %x is added at %d\n", dest_port, i);
41391e128c81SArun Easi 		break;
41401e128c81SArun Easi 	case QED_LLH_FILTER_UDP_DEST_PORT:
41411e128c81SArun Easi 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
41421e128c81SArun Easi 			   "UDP dst port %x is added at %d\n", dest_port, i);
41431e128c81SArun Easi 		break;
41441e128c81SArun Easi 	case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
41451e128c81SArun Easi 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
41461e128c81SArun Easi 			   "TCP src/dst ports %x/%x are added at %d\n",
41471e128c81SArun Easi 			   source_port_or_eth_type, dest_port, i);
41481e128c81SArun Easi 		break;
41491e128c81SArun Easi 	case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
41501e128c81SArun Easi 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
41511e128c81SArun Easi 			   "UDP src/dst ports %x/%x are added at %d\n",
41521e128c81SArun Easi 			   source_port_or_eth_type, dest_port, i);
41531e128c81SArun Easi 		break;
41541e128c81SArun Easi 	}
41551e128c81SArun Easi 	return 0;
41561e128c81SArun Easi }
41571e128c81SArun Easi 
41581e128c81SArun Easi void
41591e128c81SArun Easi qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
41601e128c81SArun Easi 			       struct qed_ptt *p_ptt,
41611e128c81SArun Easi 			       u16 source_port_or_eth_type,
41621e128c81SArun Easi 			       u16 dest_port,
41631e128c81SArun Easi 			       enum qed_llh_port_filter_type_t type)
41641e128c81SArun Easi {
41651e128c81SArun Easi 	u32 high = 0, low = 0;
41661e128c81SArun Easi 	int i;
41671e128c81SArun Easi 
41680bc5fe85SSudarsana Reddy Kalluru 	if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
41691e128c81SArun Easi 		return;
41701e128c81SArun Easi 
41711e128c81SArun Easi 	switch (type) {
41721e128c81SArun Easi 	case QED_LLH_FILTER_ETHERTYPE:
41731e128c81SArun Easi 		high = source_port_or_eth_type;
41741e128c81SArun Easi 		break;
41751e128c81SArun Easi 	case QED_LLH_FILTER_TCP_SRC_PORT:
41761e128c81SArun Easi 	case QED_LLH_FILTER_UDP_SRC_PORT:
41771e128c81SArun Easi 		low = source_port_or_eth_type << 16;
41781e128c81SArun Easi 		break;
41791e128c81SArun Easi 	case QED_LLH_FILTER_TCP_DEST_PORT:
41801e128c81SArun Easi 	case QED_LLH_FILTER_UDP_DEST_PORT:
41811e128c81SArun Easi 		low = dest_port;
41821e128c81SArun Easi 		break;
41831e128c81SArun Easi 	case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
41841e128c81SArun Easi 	case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
41851e128c81SArun Easi 		low = (source_port_or_eth_type << 16) | dest_port;
41861e128c81SArun Easi 		break;
41871e128c81SArun Easi 	default:
41881e128c81SArun Easi 		DP_NOTICE(p_hwfn,
41891e128c81SArun Easi 			  "Non valid LLH protocol filter type %d\n", type);
41901e128c81SArun Easi 		return;
41911e128c81SArun Easi 	}
41921e128c81SArun Easi 
41931e128c81SArun Easi 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
41941e128c81SArun Easi 		if (!qed_rd(p_hwfn, p_ptt,
41951e128c81SArun Easi 			    NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
41961e128c81SArun Easi 			continue;
41971e128c81SArun Easi 		if (!qed_rd(p_hwfn, p_ptt,
41981e128c81SArun Easi 			    NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
41991e128c81SArun Easi 			continue;
42001e128c81SArun Easi 		if (!(qed_rd(p_hwfn, p_ptt,
42011e128c81SArun Easi 			     NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
42021e128c81SArun Easi 			     i * sizeof(u32)) & BIT(type)))
42031e128c81SArun Easi 			continue;
42041e128c81SArun Easi 		if (qed_rd(p_hwfn, p_ptt,
42051e128c81SArun Easi 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
42061e128c81SArun Easi 			   2 * i * sizeof(u32)) != low)
42071e128c81SArun Easi 			continue;
42081e128c81SArun Easi 		if (qed_rd(p_hwfn, p_ptt,
42091e128c81SArun Easi 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
42101e128c81SArun Easi 			   (2 * i + 1) * sizeof(u32)) != high)
42111e128c81SArun Easi 			continue;
42121e128c81SArun Easi 
42131e128c81SArun Easi 		qed_wr(p_hwfn, p_ptt,
42141e128c81SArun Easi 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
42151e128c81SArun Easi 		qed_wr(p_hwfn, p_ptt,
42161e128c81SArun Easi 		       NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
42171e128c81SArun Easi 		qed_wr(p_hwfn, p_ptt,
42181e128c81SArun Easi 		       NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
42191e128c81SArun Easi 		       i * sizeof(u32), 0);
42201e128c81SArun Easi 		qed_wr(p_hwfn, p_ptt,
42211e128c81SArun Easi 		       NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
42221e128c81SArun Easi 		qed_wr(p_hwfn, p_ptt,
42231e128c81SArun Easi 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
42241e128c81SArun Easi 		       (2 * i + 1) * sizeof(u32), 0);
42251e128c81SArun Easi 		break;
42261e128c81SArun Easi 	}
42271e128c81SArun Easi 
42281e128c81SArun Easi 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
42291e128c81SArun Easi 		DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
42301e128c81SArun Easi }
42311e128c81SArun Easi 
4232722003acSSudarsana Reddy Kalluru static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4233722003acSSudarsana Reddy Kalluru 			    u32 hw_addr, void *p_eth_qzone,
4234722003acSSudarsana Reddy Kalluru 			    size_t eth_qzone_size, u8 timeset)
4235722003acSSudarsana Reddy Kalluru {
4236722003acSSudarsana Reddy Kalluru 	struct coalescing_timeset *p_coal_timeset;
4237722003acSSudarsana Reddy Kalluru 
4238722003acSSudarsana Reddy Kalluru 	if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
4239722003acSSudarsana Reddy Kalluru 		DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
4240722003acSSudarsana Reddy Kalluru 		return -EINVAL;
4241722003acSSudarsana Reddy Kalluru 	}
4242722003acSSudarsana Reddy Kalluru 
4243722003acSSudarsana Reddy Kalluru 	p_coal_timeset = p_eth_qzone;
4244477f2d14SRahul Verma 	memset(p_eth_qzone, 0, eth_qzone_size);
4245722003acSSudarsana Reddy Kalluru 	SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
4246722003acSSudarsana Reddy Kalluru 	SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
4247722003acSSudarsana Reddy Kalluru 	qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
4248722003acSSudarsana Reddy Kalluru 
4249722003acSSudarsana Reddy Kalluru 	return 0;
4250722003acSSudarsana Reddy Kalluru }
4251722003acSSudarsana Reddy Kalluru 
4252477f2d14SRahul Verma int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle)
4253477f2d14SRahul Verma {
4254477f2d14SRahul Verma 	struct qed_queue_cid *p_cid = p_handle;
4255477f2d14SRahul Verma 	struct qed_hwfn *p_hwfn;
4256477f2d14SRahul Verma 	struct qed_ptt *p_ptt;
4257477f2d14SRahul Verma 	int rc = 0;
4258477f2d14SRahul Verma 
4259477f2d14SRahul Verma 	p_hwfn = p_cid->p_owner;
4260477f2d14SRahul Verma 
4261477f2d14SRahul Verma 	if (IS_VF(p_hwfn->cdev))
4262477f2d14SRahul Verma 		return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid);
4263477f2d14SRahul Verma 
4264477f2d14SRahul Verma 	p_ptt = qed_ptt_acquire(p_hwfn);
4265477f2d14SRahul Verma 	if (!p_ptt)
4266477f2d14SRahul Verma 		return -EAGAIN;
4267477f2d14SRahul Verma 
4268477f2d14SRahul Verma 	if (rx_coal) {
4269477f2d14SRahul Verma 		rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
4270477f2d14SRahul Verma 		if (rc)
4271477f2d14SRahul Verma 			goto out;
4272477f2d14SRahul Verma 		p_hwfn->cdev->rx_coalesce_usecs = rx_coal;
4273477f2d14SRahul Verma 	}
4274477f2d14SRahul Verma 
4275477f2d14SRahul Verma 	if (tx_coal) {
4276477f2d14SRahul Verma 		rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
4277477f2d14SRahul Verma 		if (rc)
4278477f2d14SRahul Verma 			goto out;
4279477f2d14SRahul Verma 		p_hwfn->cdev->tx_coalesce_usecs = tx_coal;
4280477f2d14SRahul Verma 	}
4281477f2d14SRahul Verma out:
4282477f2d14SRahul Verma 	qed_ptt_release(p_hwfn, p_ptt);
4283477f2d14SRahul Verma 	return rc;
4284477f2d14SRahul Verma }
4285477f2d14SRahul Verma 
4286477f2d14SRahul Verma int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
4287477f2d14SRahul Verma 			 struct qed_ptt *p_ptt,
4288477f2d14SRahul Verma 			 u16 coalesce, struct qed_queue_cid *p_cid)
4289722003acSSudarsana Reddy Kalluru {
4290722003acSSudarsana Reddy Kalluru 	struct ustorm_eth_queue_zone eth_qzone;
4291722003acSSudarsana Reddy Kalluru 	u8 timeset, timer_res;
4292722003acSSudarsana Reddy Kalluru 	u32 address;
4293722003acSSudarsana Reddy Kalluru 	int rc;
4294722003acSSudarsana Reddy Kalluru 
4295722003acSSudarsana Reddy Kalluru 	/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
4296722003acSSudarsana Reddy Kalluru 	if (coalesce <= 0x7F) {
4297722003acSSudarsana Reddy Kalluru 		timer_res = 0;
4298722003acSSudarsana Reddy Kalluru 	} else if (coalesce <= 0xFF) {
4299722003acSSudarsana Reddy Kalluru 		timer_res = 1;
4300722003acSSudarsana Reddy Kalluru 	} else if (coalesce <= 0x1FF) {
4301722003acSSudarsana Reddy Kalluru 		timer_res = 2;
4302722003acSSudarsana Reddy Kalluru 	} else {
4303722003acSSudarsana Reddy Kalluru 		DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
4304722003acSSudarsana Reddy Kalluru 		return -EINVAL;
4305722003acSSudarsana Reddy Kalluru 	}
4306722003acSSudarsana Reddy Kalluru 	timeset = (u8)(coalesce >> timer_res);
4307722003acSSudarsana Reddy Kalluru 
4308477f2d14SRahul Verma 	rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res,
4309477f2d14SRahul Verma 				   p_cid->sb_igu_id, false);
4310722003acSSudarsana Reddy Kalluru 	if (rc)
4311722003acSSudarsana Reddy Kalluru 		goto out;
4312722003acSSudarsana Reddy Kalluru 
4313477f2d14SRahul Verma 	address = BAR0_MAP_REG_USDM_RAM +
4314477f2d14SRahul Verma 		  USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
4315722003acSSudarsana Reddy Kalluru 
4316722003acSSudarsana Reddy Kalluru 	rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
4317722003acSSudarsana Reddy Kalluru 			      sizeof(struct ustorm_eth_queue_zone), timeset);
4318722003acSSudarsana Reddy Kalluru 	if (rc)
4319722003acSSudarsana Reddy Kalluru 		goto out;
4320722003acSSudarsana Reddy Kalluru 
4321722003acSSudarsana Reddy Kalluru out:
4322722003acSSudarsana Reddy Kalluru 	return rc;
4323722003acSSudarsana Reddy Kalluru }
4324722003acSSudarsana Reddy Kalluru 
4325477f2d14SRahul Verma int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
4326477f2d14SRahul Verma 			 struct qed_ptt *p_ptt,
4327477f2d14SRahul Verma 			 u16 coalesce, struct qed_queue_cid *p_cid)
4328722003acSSudarsana Reddy Kalluru {
4329722003acSSudarsana Reddy Kalluru 	struct xstorm_eth_queue_zone eth_qzone;
4330722003acSSudarsana Reddy Kalluru 	u8 timeset, timer_res;
4331722003acSSudarsana Reddy Kalluru 	u32 address;
4332722003acSSudarsana Reddy Kalluru 	int rc;
4333722003acSSudarsana Reddy Kalluru 
4334722003acSSudarsana Reddy Kalluru 	/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
4335722003acSSudarsana Reddy Kalluru 	if (coalesce <= 0x7F) {
4336722003acSSudarsana Reddy Kalluru 		timer_res = 0;
4337722003acSSudarsana Reddy Kalluru 	} else if (coalesce <= 0xFF) {
4338722003acSSudarsana Reddy Kalluru 		timer_res = 1;
4339722003acSSudarsana Reddy Kalluru 	} else if (coalesce <= 0x1FF) {
4340722003acSSudarsana Reddy Kalluru 		timer_res = 2;
4341722003acSSudarsana Reddy Kalluru 	} else {
4342722003acSSudarsana Reddy Kalluru 		DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
4343722003acSSudarsana Reddy Kalluru 		return -EINVAL;
4344722003acSSudarsana Reddy Kalluru 	}
4345722003acSSudarsana Reddy Kalluru 	timeset = (u8)(coalesce >> timer_res);
4346722003acSSudarsana Reddy Kalluru 
4347477f2d14SRahul Verma 	rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res,
4348477f2d14SRahul Verma 				   p_cid->sb_igu_id, true);
4349722003acSSudarsana Reddy Kalluru 	if (rc)
4350722003acSSudarsana Reddy Kalluru 		goto out;
4351722003acSSudarsana Reddy Kalluru 
4352477f2d14SRahul Verma 	address = BAR0_MAP_REG_XSDM_RAM +
4353477f2d14SRahul Verma 		  XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
4354722003acSSudarsana Reddy Kalluru 
4355722003acSSudarsana Reddy Kalluru 	rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
4356722003acSSudarsana Reddy Kalluru 			      sizeof(struct xstorm_eth_queue_zone), timeset);
4357722003acSSudarsana Reddy Kalluru out:
4358722003acSSudarsana Reddy Kalluru 	return rc;
4359722003acSSudarsana Reddy Kalluru }
4360722003acSSudarsana Reddy Kalluru 
4361bcd197c8SManish Chopra /* Calculate final WFQ values for all vports and configure them.
4362bcd197c8SManish Chopra  * After this configuration each vport will have
4363bcd197c8SManish Chopra  * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
4364bcd197c8SManish Chopra  */
4365bcd197c8SManish Chopra static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
4366bcd197c8SManish Chopra 					     struct qed_ptt *p_ptt,
4367bcd197c8SManish Chopra 					     u32 min_pf_rate)
4368bcd197c8SManish Chopra {
4369bcd197c8SManish Chopra 	struct init_qm_vport_params *vport_params;
4370bcd197c8SManish Chopra 	int i;
4371bcd197c8SManish Chopra 
4372bcd197c8SManish Chopra 	vport_params = p_hwfn->qm_info.qm_vport_params;
4373bcd197c8SManish Chopra 
4374bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
4375bcd197c8SManish Chopra 		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
4376bcd197c8SManish Chopra 
4377bcd197c8SManish Chopra 		vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
4378bcd197c8SManish Chopra 						min_pf_rate;
4379bcd197c8SManish Chopra 		qed_init_vport_wfq(p_hwfn, p_ptt,
4380bcd197c8SManish Chopra 				   vport_params[i].first_tx_pq_id,
4381bcd197c8SManish Chopra 				   vport_params[i].vport_wfq);
4382bcd197c8SManish Chopra 	}
4383bcd197c8SManish Chopra }
4384bcd197c8SManish Chopra 
4385bcd197c8SManish Chopra static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
4386bcd197c8SManish Chopra 				       u32 min_pf_rate)
4387bcd197c8SManish Chopra 
4388bcd197c8SManish Chopra {
4389bcd197c8SManish Chopra 	int i;
4390bcd197c8SManish Chopra 
4391bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
4392bcd197c8SManish Chopra 		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
4393bcd197c8SManish Chopra }
4394bcd197c8SManish Chopra 
4395bcd197c8SManish Chopra static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
4396bcd197c8SManish Chopra 					   struct qed_ptt *p_ptt,
4397bcd197c8SManish Chopra 					   u32 min_pf_rate)
4398bcd197c8SManish Chopra {
4399bcd197c8SManish Chopra 	struct init_qm_vport_params *vport_params;
4400bcd197c8SManish Chopra 	int i;
4401bcd197c8SManish Chopra 
4402bcd197c8SManish Chopra 	vport_params = p_hwfn->qm_info.qm_vport_params;
4403bcd197c8SManish Chopra 
4404bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
4405bcd197c8SManish Chopra 		qed_init_wfq_default_param(p_hwfn, min_pf_rate);
4406bcd197c8SManish Chopra 		qed_init_vport_wfq(p_hwfn, p_ptt,
4407bcd197c8SManish Chopra 				   vport_params[i].first_tx_pq_id,
4408bcd197c8SManish Chopra 				   vport_params[i].vport_wfq);
4409bcd197c8SManish Chopra 	}
4410bcd197c8SManish Chopra }
4411bcd197c8SManish Chopra 
4412bcd197c8SManish Chopra /* This function performs several validations for WFQ
4413bcd197c8SManish Chopra  * configuration and required min rate for a given vport
4414bcd197c8SManish Chopra  * 1. req_rate must be greater than one percent of min_pf_rate.
4415bcd197c8SManish Chopra  * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
4416bcd197c8SManish Chopra  *    rates to get less than one percent of min_pf_rate.
4417bcd197c8SManish Chopra  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
4418bcd197c8SManish Chopra  */
4419bcd197c8SManish Chopra static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
44201a635e48SYuval Mintz 			      u16 vport_id, u32 req_rate, u32 min_pf_rate)
4421bcd197c8SManish Chopra {
4422bcd197c8SManish Chopra 	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
4423bcd197c8SManish Chopra 	int non_requested_count = 0, req_count = 0, i, num_vports;
4424bcd197c8SManish Chopra 
4425bcd197c8SManish Chopra 	num_vports = p_hwfn->qm_info.num_vports;
4426bcd197c8SManish Chopra 
4427bcd197c8SManish Chopra 	/* Accounting for the vports which are configured for WFQ explicitly */
4428bcd197c8SManish Chopra 	for (i = 0; i < num_vports; i++) {
4429bcd197c8SManish Chopra 		u32 tmp_speed;
4430bcd197c8SManish Chopra 
4431bcd197c8SManish Chopra 		if ((i != vport_id) &&
4432bcd197c8SManish Chopra 		    p_hwfn->qm_info.wfq_data[i].configured) {
4433bcd197c8SManish Chopra 			req_count++;
4434bcd197c8SManish Chopra 			tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
4435bcd197c8SManish Chopra 			total_req_min_rate += tmp_speed;
4436bcd197c8SManish Chopra 		}
4437bcd197c8SManish Chopra 	}
4438bcd197c8SManish Chopra 
4439bcd197c8SManish Chopra 	/* Include current vport data as well */
4440bcd197c8SManish Chopra 	req_count++;
4441bcd197c8SManish Chopra 	total_req_min_rate += req_rate;
4442bcd197c8SManish Chopra 	non_requested_count = num_vports - req_count;
4443bcd197c8SManish Chopra 
4444bcd197c8SManish Chopra 	if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
4445bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4446bcd197c8SManish Chopra 			   "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
4447bcd197c8SManish Chopra 			   vport_id, req_rate, min_pf_rate);
4448bcd197c8SManish Chopra 		return -EINVAL;
4449bcd197c8SManish Chopra 	}
4450bcd197c8SManish Chopra 
4451bcd197c8SManish Chopra 	if (num_vports > QED_WFQ_UNIT) {
4452bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4453bcd197c8SManish Chopra 			   "Number of vports is greater than %d\n",
4454bcd197c8SManish Chopra 			   QED_WFQ_UNIT);
4455bcd197c8SManish Chopra 		return -EINVAL;
4456bcd197c8SManish Chopra 	}
4457bcd197c8SManish Chopra 
4458bcd197c8SManish Chopra 	if (total_req_min_rate > min_pf_rate) {
4459bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4460bcd197c8SManish Chopra 			   "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
4461bcd197c8SManish Chopra 			   total_req_min_rate, min_pf_rate);
4462bcd197c8SManish Chopra 		return -EINVAL;
4463bcd197c8SManish Chopra 	}
4464bcd197c8SManish Chopra 
4465bcd197c8SManish Chopra 	total_left_rate	= min_pf_rate - total_req_min_rate;
4466bcd197c8SManish Chopra 
4467bcd197c8SManish Chopra 	left_rate_per_vp = total_left_rate / non_requested_count;
4468bcd197c8SManish Chopra 	if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
4469bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4470bcd197c8SManish Chopra 			   "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
4471bcd197c8SManish Chopra 			   left_rate_per_vp, min_pf_rate);
4472bcd197c8SManish Chopra 		return -EINVAL;
4473bcd197c8SManish Chopra 	}
4474bcd197c8SManish Chopra 
4475bcd197c8SManish Chopra 	p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
4476bcd197c8SManish Chopra 	p_hwfn->qm_info.wfq_data[vport_id].configured = true;
4477bcd197c8SManish Chopra 
4478bcd197c8SManish Chopra 	for (i = 0; i < num_vports; i++) {
4479bcd197c8SManish Chopra 		if (p_hwfn->qm_info.wfq_data[i].configured)
4480bcd197c8SManish Chopra 			continue;
4481bcd197c8SManish Chopra 
4482bcd197c8SManish Chopra 		p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
4483bcd197c8SManish Chopra 	}
4484bcd197c8SManish Chopra 
4485bcd197c8SManish Chopra 	return 0;
4486bcd197c8SManish Chopra }
4487bcd197c8SManish Chopra 
4488733def6aSYuval Mintz static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
4489733def6aSYuval Mintz 				     struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
4490733def6aSYuval Mintz {
4491733def6aSYuval Mintz 	struct qed_mcp_link_state *p_link;
4492733def6aSYuval Mintz 	int rc = 0;
4493733def6aSYuval Mintz 
4494733def6aSYuval Mintz 	p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
4495733def6aSYuval Mintz 
4496733def6aSYuval Mintz 	if (!p_link->min_pf_rate) {
4497733def6aSYuval Mintz 		p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
4498733def6aSYuval Mintz 		p_hwfn->qm_info.wfq_data[vp_id].configured = true;
4499733def6aSYuval Mintz 		return rc;
4500733def6aSYuval Mintz 	}
4501733def6aSYuval Mintz 
4502733def6aSYuval Mintz 	rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
4503733def6aSYuval Mintz 
45041a635e48SYuval Mintz 	if (!rc)
4505733def6aSYuval Mintz 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
4506733def6aSYuval Mintz 						 p_link->min_pf_rate);
4507733def6aSYuval Mintz 	else
4508733def6aSYuval Mintz 		DP_NOTICE(p_hwfn,
4509733def6aSYuval Mintz 			  "Validation failed while configuring min rate\n");
4510733def6aSYuval Mintz 
4511733def6aSYuval Mintz 	return rc;
4512733def6aSYuval Mintz }
4513733def6aSYuval Mintz 
4514bcd197c8SManish Chopra static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
4515bcd197c8SManish Chopra 						 struct qed_ptt *p_ptt,
4516bcd197c8SManish Chopra 						 u32 min_pf_rate)
4517bcd197c8SManish Chopra {
4518bcd197c8SManish Chopra 	bool use_wfq = false;
4519bcd197c8SManish Chopra 	int rc = 0;
4520bcd197c8SManish Chopra 	u16 i;
4521bcd197c8SManish Chopra 
4522bcd197c8SManish Chopra 	/* Validate all pre configured vports for wfq */
4523bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
4524bcd197c8SManish Chopra 		u32 rate;
4525bcd197c8SManish Chopra 
4526bcd197c8SManish Chopra 		if (!p_hwfn->qm_info.wfq_data[i].configured)
4527bcd197c8SManish Chopra 			continue;
4528bcd197c8SManish Chopra 
4529bcd197c8SManish Chopra 		rate = p_hwfn->qm_info.wfq_data[i].min_speed;
4530bcd197c8SManish Chopra 		use_wfq = true;
4531bcd197c8SManish Chopra 
4532bcd197c8SManish Chopra 		rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
4533bcd197c8SManish Chopra 		if (rc) {
4534bcd197c8SManish Chopra 			DP_NOTICE(p_hwfn,
4535bcd197c8SManish Chopra 				  "WFQ validation failed while configuring min rate\n");
4536bcd197c8SManish Chopra 			break;
4537bcd197c8SManish Chopra 		}
4538bcd197c8SManish Chopra 	}
4539bcd197c8SManish Chopra 
4540bcd197c8SManish Chopra 	if (!rc && use_wfq)
4541bcd197c8SManish Chopra 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
4542bcd197c8SManish Chopra 	else
4543bcd197c8SManish Chopra 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
4544bcd197c8SManish Chopra 
4545bcd197c8SManish Chopra 	return rc;
4546bcd197c8SManish Chopra }
4547bcd197c8SManish Chopra 
4548733def6aSYuval Mintz /* Main API for qed clients to configure vport min rate.
4549733def6aSYuval Mintz  * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
4550733def6aSYuval Mintz  * rate - Speed in Mbps needs to be assigned to a given vport.
4551733def6aSYuval Mintz  */
4552733def6aSYuval Mintz int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
4553733def6aSYuval Mintz {
4554733def6aSYuval Mintz 	int i, rc = -EINVAL;
4555733def6aSYuval Mintz 
4556733def6aSYuval Mintz 	/* Currently not supported; Might change in future */
4557733def6aSYuval Mintz 	if (cdev->num_hwfns > 1) {
4558733def6aSYuval Mintz 		DP_NOTICE(cdev,
4559733def6aSYuval Mintz 			  "WFQ configuration is not supported for this device\n");
4560733def6aSYuval Mintz 		return rc;
4561733def6aSYuval Mintz 	}
4562733def6aSYuval Mintz 
4563733def6aSYuval Mintz 	for_each_hwfn(cdev, i) {
4564733def6aSYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4565733def6aSYuval Mintz 		struct qed_ptt *p_ptt;
4566733def6aSYuval Mintz 
4567733def6aSYuval Mintz 		p_ptt = qed_ptt_acquire(p_hwfn);
4568733def6aSYuval Mintz 		if (!p_ptt)
4569733def6aSYuval Mintz 			return -EBUSY;
4570733def6aSYuval Mintz 
4571733def6aSYuval Mintz 		rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
4572733def6aSYuval Mintz 
4573d572c430SYuval Mintz 		if (rc) {
4574733def6aSYuval Mintz 			qed_ptt_release(p_hwfn, p_ptt);
4575733def6aSYuval Mintz 			return rc;
4576733def6aSYuval Mintz 		}
4577733def6aSYuval Mintz 
4578733def6aSYuval Mintz 		qed_ptt_release(p_hwfn, p_ptt);
4579733def6aSYuval Mintz 	}
4580733def6aSYuval Mintz 
4581733def6aSYuval Mintz 	return rc;
4582733def6aSYuval Mintz }
4583733def6aSYuval Mintz 
4584bcd197c8SManish Chopra /* API to configure WFQ from mcp link change */
45856f437d43SMintz, Yuval void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
45866f437d43SMintz, Yuval 					 struct qed_ptt *p_ptt, u32 min_pf_rate)
4587bcd197c8SManish Chopra {
4588bcd197c8SManish Chopra 	int i;
4589bcd197c8SManish Chopra 
45903e7cfce2SYuval Mintz 	if (cdev->num_hwfns > 1) {
45913e7cfce2SYuval Mintz 		DP_VERBOSE(cdev,
45923e7cfce2SYuval Mintz 			   NETIF_MSG_LINK,
45933e7cfce2SYuval Mintz 			   "WFQ configuration is not supported for this device\n");
45943e7cfce2SYuval Mintz 		return;
45953e7cfce2SYuval Mintz 	}
45963e7cfce2SYuval Mintz 
4597bcd197c8SManish Chopra 	for_each_hwfn(cdev, i) {
4598bcd197c8SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4599bcd197c8SManish Chopra 
46006f437d43SMintz, Yuval 		__qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
4601bcd197c8SManish Chopra 						      min_pf_rate);
4602bcd197c8SManish Chopra 	}
4603bcd197c8SManish Chopra }
46044b01e519SManish Chopra 
46054b01e519SManish Chopra int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
46064b01e519SManish Chopra 				     struct qed_ptt *p_ptt,
46074b01e519SManish Chopra 				     struct qed_mcp_link_state *p_link,
46084b01e519SManish Chopra 				     u8 max_bw)
46094b01e519SManish Chopra {
46104b01e519SManish Chopra 	int rc = 0;
46114b01e519SManish Chopra 
46124b01e519SManish Chopra 	p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
46134b01e519SManish Chopra 
46144b01e519SManish Chopra 	if (!p_link->line_speed && (max_bw != 100))
46154b01e519SManish Chopra 		return rc;
46164b01e519SManish Chopra 
46174b01e519SManish Chopra 	p_link->speed = (p_link->line_speed * max_bw) / 100;
46184b01e519SManish Chopra 	p_hwfn->qm_info.pf_rl = p_link->speed;
46194b01e519SManish Chopra 
46204b01e519SManish Chopra 	/* Since the limiter also affects Tx-switched traffic, we don't want it
46214b01e519SManish Chopra 	 * to limit such traffic in case there's no actual limit.
46224b01e519SManish Chopra 	 * In that case, set limit to imaginary high boundary.
46234b01e519SManish Chopra 	 */
46244b01e519SManish Chopra 	if (max_bw == 100)
46254b01e519SManish Chopra 		p_hwfn->qm_info.pf_rl = 100000;
46264b01e519SManish Chopra 
46274b01e519SManish Chopra 	rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
46284b01e519SManish Chopra 			    p_hwfn->qm_info.pf_rl);
46294b01e519SManish Chopra 
46304b01e519SManish Chopra 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
46314b01e519SManish Chopra 		   "Configured MAX bandwidth to be %08x Mb/sec\n",
46324b01e519SManish Chopra 		   p_link->speed);
46334b01e519SManish Chopra 
46344b01e519SManish Chopra 	return rc;
46354b01e519SManish Chopra }
46364b01e519SManish Chopra 
46374b01e519SManish Chopra /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
46384b01e519SManish Chopra int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
46394b01e519SManish Chopra {
46404b01e519SManish Chopra 	int i, rc = -EINVAL;
46414b01e519SManish Chopra 
46424b01e519SManish Chopra 	if (max_bw < 1 || max_bw > 100) {
46434b01e519SManish Chopra 		DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
46444b01e519SManish Chopra 		return rc;
46454b01e519SManish Chopra 	}
46464b01e519SManish Chopra 
46474b01e519SManish Chopra 	for_each_hwfn(cdev, i) {
46484b01e519SManish Chopra 		struct qed_hwfn	*p_hwfn = &cdev->hwfns[i];
46494b01e519SManish Chopra 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
46504b01e519SManish Chopra 		struct qed_mcp_link_state *p_link;
46514b01e519SManish Chopra 		struct qed_ptt *p_ptt;
46524b01e519SManish Chopra 
46534b01e519SManish Chopra 		p_link = &p_lead->mcp_info->link_output;
46544b01e519SManish Chopra 
46554b01e519SManish Chopra 		p_ptt = qed_ptt_acquire(p_hwfn);
46564b01e519SManish Chopra 		if (!p_ptt)
46574b01e519SManish Chopra 			return -EBUSY;
46584b01e519SManish Chopra 
46594b01e519SManish Chopra 		rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
46604b01e519SManish Chopra 						      p_link, max_bw);
46614b01e519SManish Chopra 
46624b01e519SManish Chopra 		qed_ptt_release(p_hwfn, p_ptt);
46634b01e519SManish Chopra 
46644b01e519SManish Chopra 		if (rc)
46654b01e519SManish Chopra 			break;
46664b01e519SManish Chopra 	}
46674b01e519SManish Chopra 
46684b01e519SManish Chopra 	return rc;
46694b01e519SManish Chopra }
4670a64b02d5SManish Chopra 
4671a64b02d5SManish Chopra int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
4672a64b02d5SManish Chopra 				     struct qed_ptt *p_ptt,
4673a64b02d5SManish Chopra 				     struct qed_mcp_link_state *p_link,
4674a64b02d5SManish Chopra 				     u8 min_bw)
4675a64b02d5SManish Chopra {
4676a64b02d5SManish Chopra 	int rc = 0;
4677a64b02d5SManish Chopra 
4678a64b02d5SManish Chopra 	p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
4679a64b02d5SManish Chopra 	p_hwfn->qm_info.pf_wfq = min_bw;
4680a64b02d5SManish Chopra 
4681a64b02d5SManish Chopra 	if (!p_link->line_speed)
4682a64b02d5SManish Chopra 		return rc;
4683a64b02d5SManish Chopra 
4684a64b02d5SManish Chopra 	p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
4685a64b02d5SManish Chopra 
4686a64b02d5SManish Chopra 	rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
4687a64b02d5SManish Chopra 
4688a64b02d5SManish Chopra 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4689a64b02d5SManish Chopra 		   "Configured MIN bandwidth to be %d Mb/sec\n",
4690a64b02d5SManish Chopra 		   p_link->min_pf_rate);
4691a64b02d5SManish Chopra 
4692a64b02d5SManish Chopra 	return rc;
4693a64b02d5SManish Chopra }
4694a64b02d5SManish Chopra 
4695a64b02d5SManish Chopra /* Main API to configure PF min bandwidth where bw range is [1-100] */
4696a64b02d5SManish Chopra int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
4697a64b02d5SManish Chopra {
4698a64b02d5SManish Chopra 	int i, rc = -EINVAL;
4699a64b02d5SManish Chopra 
4700a64b02d5SManish Chopra 	if (min_bw < 1 || min_bw > 100) {
4701a64b02d5SManish Chopra 		DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
4702a64b02d5SManish Chopra 		return rc;
4703a64b02d5SManish Chopra 	}
4704a64b02d5SManish Chopra 
4705a64b02d5SManish Chopra 	for_each_hwfn(cdev, i) {
4706a64b02d5SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4707a64b02d5SManish Chopra 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
4708a64b02d5SManish Chopra 		struct qed_mcp_link_state *p_link;
4709a64b02d5SManish Chopra 		struct qed_ptt *p_ptt;
4710a64b02d5SManish Chopra 
4711a64b02d5SManish Chopra 		p_link = &p_lead->mcp_info->link_output;
4712a64b02d5SManish Chopra 
4713a64b02d5SManish Chopra 		p_ptt = qed_ptt_acquire(p_hwfn);
4714a64b02d5SManish Chopra 		if (!p_ptt)
4715a64b02d5SManish Chopra 			return -EBUSY;
4716a64b02d5SManish Chopra 
4717a64b02d5SManish Chopra 		rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
4718a64b02d5SManish Chopra 						      p_link, min_bw);
4719a64b02d5SManish Chopra 		if (rc) {
4720a64b02d5SManish Chopra 			qed_ptt_release(p_hwfn, p_ptt);
4721a64b02d5SManish Chopra 			return rc;
4722a64b02d5SManish Chopra 		}
4723a64b02d5SManish Chopra 
4724a64b02d5SManish Chopra 		if (p_link->min_pf_rate) {
4725a64b02d5SManish Chopra 			u32 min_rate = p_link->min_pf_rate;
4726a64b02d5SManish Chopra 
4727a64b02d5SManish Chopra 			rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
4728a64b02d5SManish Chopra 								   p_ptt,
4729a64b02d5SManish Chopra 								   min_rate);
4730a64b02d5SManish Chopra 		}
4731a64b02d5SManish Chopra 
4732a64b02d5SManish Chopra 		qed_ptt_release(p_hwfn, p_ptt);
4733a64b02d5SManish Chopra 	}
4734a64b02d5SManish Chopra 
4735a64b02d5SManish Chopra 	return rc;
4736a64b02d5SManish Chopra }
4737733def6aSYuval Mintz 
4738733def6aSYuval Mintz void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
4739733def6aSYuval Mintz {
4740733def6aSYuval Mintz 	struct qed_mcp_link_state *p_link;
4741733def6aSYuval Mintz 
4742733def6aSYuval Mintz 	p_link = &p_hwfn->mcp_info->link_output;
4743733def6aSYuval Mintz 
4744733def6aSYuval Mintz 	if (p_link->min_pf_rate)
4745733def6aSYuval Mintz 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
4746733def6aSYuval Mintz 					       p_link->min_pf_rate);
4747733def6aSYuval Mintz 
4748733def6aSYuval Mintz 	memset(p_hwfn->qm_info.wfq_data, 0,
4749733def6aSYuval Mintz 	       sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
4750733def6aSYuval Mintz }
47519c79ddaaSMintz, Yuval 
47520ebcebbeSSudarsana Reddy Kalluru int qed_device_num_ports(struct qed_dev *cdev)
47539c79ddaaSMintz, Yuval {
47540ebcebbeSSudarsana Reddy Kalluru 	return cdev->num_ports;
4755db82f70eSsudarsana.kalluru@cavium.com }
4756456a5849SKalderon, Michal 
4757456a5849SKalderon, Michal void qed_set_fw_mac_addr(__le16 *fw_msb,
4758456a5849SKalderon, Michal 			 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
4759456a5849SKalderon, Michal {
4760456a5849SKalderon, Michal 	((u8 *)fw_msb)[0] = mac[1];
4761456a5849SKalderon, Michal 	((u8 *)fw_msb)[1] = mac[0];
4762456a5849SKalderon, Michal 	((u8 *)fw_mid)[0] = mac[3];
4763456a5849SKalderon, Michal 	((u8 *)fw_mid)[1] = mac[2];
4764456a5849SKalderon, Michal 	((u8 *)fw_lsb)[0] = mac[5];
4765456a5849SKalderon, Michal 	((u8 *)fw_lsb)[1] = mac[4];
4766456a5849SKalderon, Michal }
4767