11f4d4ed6SAlexander Lobakin // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2fe56b9e6SYuval Mintz /* QLogic qed NIC Driver
3e8f1cb50SMintz, Yuval * Copyright (c) 2015-2017 QLogic Corporation
4663eacd8SAlexander Lobakin * Copyright (c) 2019-2020 Marvell International Ltd.
5fe56b9e6SYuval Mintz */
6fe56b9e6SYuval Mintz
7fe56b9e6SYuval Mintz #include <linux/types.h>
8fe56b9e6SYuval Mintz #include <asm/byteorder.h>
9fe56b9e6SYuval Mintz #include <linux/io.h>
10fe56b9e6SYuval Mintz #include <linux/delay.h>
11fe56b9e6SYuval Mintz #include <linux/dma-mapping.h>
12fe56b9e6SYuval Mintz #include <linux/errno.h>
13fe56b9e6SYuval Mintz #include <linux/kernel.h>
14fe56b9e6SYuval Mintz #include <linux/mutex.h>
15fe56b9e6SYuval Mintz #include <linux/pci.h>
16fe56b9e6SYuval Mintz #include <linux/slab.h>
17fe56b9e6SYuval Mintz #include <linux/string.h>
18a91eb52aSYuval Mintz #include <linux/vmalloc.h>
19fe56b9e6SYuval Mintz #include <linux/etherdevice.h>
20fe56b9e6SYuval Mintz #include <linux/qed/qed_chain.h>
21fe56b9e6SYuval Mintz #include <linux/qed/qed_if.h>
22fe56b9e6SYuval Mintz #include "qed.h"
23fe56b9e6SYuval Mintz #include "qed_cxt.h"
2439651abdSSudarsana Reddy Kalluru #include "qed_dcbx.h"
25fe56b9e6SYuval Mintz #include "qed_dev_api.h"
261e128c81SArun Easi #include "qed_fcoe.h"
27fe56b9e6SYuval Mintz #include "qed_hsi.h"
28ee824f4bSOmkar Kulkarni #include "qed_iro_hsi.h"
29fe56b9e6SYuval Mintz #include "qed_hw.h"
30fe56b9e6SYuval Mintz #include "qed_init_ops.h"
31fe56b9e6SYuval Mintz #include "qed_int.h"
32fc831825SYuval Mintz #include "qed_iscsi.h"
330a7fb11cSYuval Mintz #include "qed_ll2.h"
34fe56b9e6SYuval Mintz #include "qed_mcp.h"
351d6cff4fSYuval Mintz #include "qed_ooo.h"
36fe56b9e6SYuval Mintz #include "qed_reg_addr.h"
37fe56b9e6SYuval Mintz #include "qed_sp.h"
3832a47e72SYuval Mintz #include "qed_sriov.h"
390b55e27dSYuval Mintz #include "qed_vf.h"
40b71b9afdSKalderon, Michal #include "qed_rdma.h"
41897e87a1SShai Malin #include "qed_nvmetcp.h"
42fe56b9e6SYuval Mintz
430caf5b26SWei Yongjun static DEFINE_SPINLOCK(qm_lock);
4439651abdSSudarsana Reddy Kalluru
4536907cd5SAriel Elior /******************** Doorbell Recovery *******************/
4636907cd5SAriel Elior /* The doorbell recovery mechanism consists of a list of entries which represent
4736907cd5SAriel Elior * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
4836907cd5SAriel Elior * entity needs to register with the mechanism and provide the parameters
4936907cd5SAriel Elior * describing it's doorbell, including a location where last used doorbell data
5036907cd5SAriel Elior * can be found. The doorbell execute function will traverse the list and
5136907cd5SAriel Elior * doorbell all of the registered entries.
5236907cd5SAriel Elior */
5336907cd5SAriel Elior struct qed_db_recovery_entry {
5436907cd5SAriel Elior struct list_head list_entry;
5536907cd5SAriel Elior void __iomem *db_addr;
5636907cd5SAriel Elior void *db_data;
5736907cd5SAriel Elior enum qed_db_rec_width db_width;
5836907cd5SAriel Elior enum qed_db_rec_space db_space;
5936907cd5SAriel Elior u8 hwfn_idx;
6036907cd5SAriel Elior };
6136907cd5SAriel Elior
6236907cd5SAriel Elior /* Display a single doorbell recovery entry */
qed_db_recovery_dp_entry(struct qed_hwfn * p_hwfn,struct qed_db_recovery_entry * db_entry,char * action)6336907cd5SAriel Elior static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
6436907cd5SAriel Elior struct qed_db_recovery_entry *db_entry,
6536907cd5SAriel Elior char *action)
6636907cd5SAriel Elior {
6736907cd5SAriel Elior DP_VERBOSE(p_hwfn,
6836907cd5SAriel Elior QED_MSG_SPQ,
6936907cd5SAriel Elior "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
7036907cd5SAriel Elior action,
7136907cd5SAriel Elior db_entry,
7236907cd5SAriel Elior db_entry->db_addr,
7336907cd5SAriel Elior db_entry->db_data,
7436907cd5SAriel Elior db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
7536907cd5SAriel Elior db_entry->db_space == DB_REC_USER ? "user" : "kernel",
7636907cd5SAriel Elior db_entry->hwfn_idx);
7736907cd5SAriel Elior }
7836907cd5SAriel Elior
7936907cd5SAriel Elior /* Doorbell address sanity (address within doorbell bar range) */
qed_db_rec_sanity(struct qed_dev * cdev,void __iomem * db_addr,enum qed_db_rec_width db_width,void * db_data)8036907cd5SAriel Elior static bool qed_db_rec_sanity(struct qed_dev *cdev,
81b61b04adSDenis Bolotin void __iomem *db_addr,
82b61b04adSDenis Bolotin enum qed_db_rec_width db_width,
83b61b04adSDenis Bolotin void *db_data)
8436907cd5SAriel Elior {
85b61b04adSDenis Bolotin u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
86b61b04adSDenis Bolotin
8736907cd5SAriel Elior /* Make sure doorbell address is within the doorbell bar */
8836907cd5SAriel Elior if (db_addr < cdev->doorbells ||
89b61b04adSDenis Bolotin (u8 __iomem *)db_addr + width >
9036907cd5SAriel Elior (u8 __iomem *)cdev->doorbells + cdev->db_size) {
9136907cd5SAriel Elior WARN(true,
9236907cd5SAriel Elior "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
9336907cd5SAriel Elior db_addr,
9436907cd5SAriel Elior cdev->doorbells,
9536907cd5SAriel Elior (u8 __iomem *)cdev->doorbells + cdev->db_size);
9636907cd5SAriel Elior return false;
9736907cd5SAriel Elior }
9836907cd5SAriel Elior
9936907cd5SAriel Elior /* ake sure doorbell data pointer is not null */
10036907cd5SAriel Elior if (!db_data) {
10136907cd5SAriel Elior WARN(true, "Illegal doorbell data pointer: %p", db_data);
10236907cd5SAriel Elior return false;
10336907cd5SAriel Elior }
10436907cd5SAriel Elior
10536907cd5SAriel Elior return true;
10636907cd5SAriel Elior }
10736907cd5SAriel Elior
10836907cd5SAriel Elior /* Find hwfn according to the doorbell address */
qed_db_rec_find_hwfn(struct qed_dev * cdev,void __iomem * db_addr)10936907cd5SAriel Elior static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev,
11036907cd5SAriel Elior void __iomem *db_addr)
11136907cd5SAriel Elior {
11236907cd5SAriel Elior struct qed_hwfn *p_hwfn;
11336907cd5SAriel Elior
11436907cd5SAriel Elior /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
11536907cd5SAriel Elior if (cdev->num_hwfns > 1)
11636907cd5SAriel Elior p_hwfn = db_addr < cdev->hwfns[1].doorbells ?
11736907cd5SAriel Elior &cdev->hwfns[0] : &cdev->hwfns[1];
11836907cd5SAriel Elior else
11936907cd5SAriel Elior p_hwfn = QED_LEADING_HWFN(cdev);
12036907cd5SAriel Elior
12136907cd5SAriel Elior return p_hwfn;
12236907cd5SAriel Elior }
12336907cd5SAriel Elior
12436907cd5SAriel Elior /* Add a new entry to the doorbell recovery mechanism */
qed_db_recovery_add(struct qed_dev * cdev,void __iomem * db_addr,void * db_data,enum qed_db_rec_width db_width,enum qed_db_rec_space db_space)12536907cd5SAriel Elior int qed_db_recovery_add(struct qed_dev *cdev,
12636907cd5SAriel Elior void __iomem *db_addr,
12736907cd5SAriel Elior void *db_data,
12836907cd5SAriel Elior enum qed_db_rec_width db_width,
12936907cd5SAriel Elior enum qed_db_rec_space db_space)
13036907cd5SAriel Elior {
13136907cd5SAriel Elior struct qed_db_recovery_entry *db_entry;
13236907cd5SAriel Elior struct qed_hwfn *p_hwfn;
13336907cd5SAriel Elior
13436907cd5SAriel Elior /* Shortcircuit VFs, for now */
13536907cd5SAriel Elior if (IS_VF(cdev)) {
13636907cd5SAriel Elior DP_VERBOSE(cdev,
13736907cd5SAriel Elior QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
13836907cd5SAriel Elior return 0;
13936907cd5SAriel Elior }
14036907cd5SAriel Elior
14136907cd5SAriel Elior /* Sanitize doorbell address */
142b61b04adSDenis Bolotin if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
14336907cd5SAriel Elior return -EINVAL;
14436907cd5SAriel Elior
14536907cd5SAriel Elior /* Obtain hwfn from doorbell address */
14636907cd5SAriel Elior p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
14736907cd5SAriel Elior
14836907cd5SAriel Elior /* Create entry */
14936907cd5SAriel Elior db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL);
15036907cd5SAriel Elior if (!db_entry) {
15136907cd5SAriel Elior DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n");
15236907cd5SAriel Elior return -ENOMEM;
15336907cd5SAriel Elior }
15436907cd5SAriel Elior
15536907cd5SAriel Elior /* Populate entry */
15636907cd5SAriel Elior db_entry->db_addr = db_addr;
15736907cd5SAriel Elior db_entry->db_data = db_data;
15836907cd5SAriel Elior db_entry->db_width = db_width;
15936907cd5SAriel Elior db_entry->db_space = db_space;
16036907cd5SAriel Elior db_entry->hwfn_idx = p_hwfn->my_id;
16136907cd5SAriel Elior
16236907cd5SAriel Elior /* Display */
16336907cd5SAriel Elior qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
16436907cd5SAriel Elior
16536907cd5SAriel Elior /* Protect the list */
16636907cd5SAriel Elior spin_lock_bh(&p_hwfn->db_recovery_info.lock);
16736907cd5SAriel Elior list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list);
16836907cd5SAriel Elior spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
16936907cd5SAriel Elior
17036907cd5SAriel Elior return 0;
17136907cd5SAriel Elior }
17236907cd5SAriel Elior
17336907cd5SAriel Elior /* Remove an entry from the doorbell recovery mechanism */
qed_db_recovery_del(struct qed_dev * cdev,void __iomem * db_addr,void * db_data)17436907cd5SAriel Elior int qed_db_recovery_del(struct qed_dev *cdev,
17536907cd5SAriel Elior void __iomem *db_addr, void *db_data)
17636907cd5SAriel Elior {
17736907cd5SAriel Elior struct qed_db_recovery_entry *db_entry = NULL;
17836907cd5SAriel Elior struct qed_hwfn *p_hwfn;
17936907cd5SAriel Elior int rc = -EINVAL;
18036907cd5SAriel Elior
18136907cd5SAriel Elior /* Shortcircuit VFs, for now */
18236907cd5SAriel Elior if (IS_VF(cdev)) {
18336907cd5SAriel Elior DP_VERBOSE(cdev,
18436907cd5SAriel Elior QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
18536907cd5SAriel Elior return 0;
18636907cd5SAriel Elior }
18736907cd5SAriel Elior
18836907cd5SAriel Elior /* Obtain hwfn from doorbell address */
18936907cd5SAriel Elior p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
19036907cd5SAriel Elior
19136907cd5SAriel Elior /* Protect the list */
19236907cd5SAriel Elior spin_lock_bh(&p_hwfn->db_recovery_info.lock);
19336907cd5SAriel Elior list_for_each_entry(db_entry,
19436907cd5SAriel Elior &p_hwfn->db_recovery_info.list, list_entry) {
19536907cd5SAriel Elior /* search according to db_data addr since db_addr is not unique (roce) */
19636907cd5SAriel Elior if (db_entry->db_data == db_data) {
19736907cd5SAriel Elior qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
19836907cd5SAriel Elior list_del(&db_entry->list_entry);
19936907cd5SAriel Elior rc = 0;
20036907cd5SAriel Elior break;
20136907cd5SAriel Elior }
20236907cd5SAriel Elior }
20336907cd5SAriel Elior
20436907cd5SAriel Elior spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
20536907cd5SAriel Elior
20636907cd5SAriel Elior if (rc == -EINVAL)
20736907cd5SAriel Elior
20836907cd5SAriel Elior DP_NOTICE(p_hwfn,
20936907cd5SAriel Elior "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
21036907cd5SAriel Elior db_data, db_addr);
21136907cd5SAriel Elior else
21236907cd5SAriel Elior kfree(db_entry);
21336907cd5SAriel Elior
21436907cd5SAriel Elior return rc;
21536907cd5SAriel Elior }
21636907cd5SAriel Elior
21736907cd5SAriel Elior /* Initialize the doorbell recovery mechanism */
qed_db_recovery_setup(struct qed_hwfn * p_hwfn)21836907cd5SAriel Elior static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn)
21936907cd5SAriel Elior {
22036907cd5SAriel Elior DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n");
22136907cd5SAriel Elior
22236907cd5SAriel Elior /* Make sure db_size was set in cdev */
22336907cd5SAriel Elior if (!p_hwfn->cdev->db_size) {
22436907cd5SAriel Elior DP_ERR(p_hwfn->cdev, "db_size not set\n");
22536907cd5SAriel Elior return -EINVAL;
22636907cd5SAriel Elior }
22736907cd5SAriel Elior
22836907cd5SAriel Elior INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list);
22936907cd5SAriel Elior spin_lock_init(&p_hwfn->db_recovery_info.lock);
23036907cd5SAriel Elior p_hwfn->db_recovery_info.db_recovery_counter = 0;
23136907cd5SAriel Elior
23236907cd5SAriel Elior return 0;
23336907cd5SAriel Elior }
23436907cd5SAriel Elior
23536907cd5SAriel Elior /* Destroy the doorbell recovery mechanism */
qed_db_recovery_teardown(struct qed_hwfn * p_hwfn)23636907cd5SAriel Elior static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
23736907cd5SAriel Elior {
23836907cd5SAriel Elior struct qed_db_recovery_entry *db_entry = NULL;
23936907cd5SAriel Elior
24036907cd5SAriel Elior DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n");
24136907cd5SAriel Elior if (!list_empty(&p_hwfn->db_recovery_info.list)) {
24236907cd5SAriel Elior DP_VERBOSE(p_hwfn,
24336907cd5SAriel Elior QED_MSG_SPQ,
24436907cd5SAriel Elior "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
24536907cd5SAriel Elior while (!list_empty(&p_hwfn->db_recovery_info.list)) {
24636907cd5SAriel Elior db_entry =
24736907cd5SAriel Elior list_first_entry(&p_hwfn->db_recovery_info.list,
24836907cd5SAriel Elior struct qed_db_recovery_entry,
24936907cd5SAriel Elior list_entry);
25036907cd5SAriel Elior qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
25136907cd5SAriel Elior list_del(&db_entry->list_entry);
25236907cd5SAriel Elior kfree(db_entry);
25336907cd5SAriel Elior }
25436907cd5SAriel Elior }
25536907cd5SAriel Elior p_hwfn->db_recovery_info.db_recovery_counter = 0;
25636907cd5SAriel Elior }
25736907cd5SAriel Elior
25836907cd5SAriel Elior /* Print the content of the doorbell recovery mechanism */
qed_db_recovery_dp(struct qed_hwfn * p_hwfn)25936907cd5SAriel Elior void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
26036907cd5SAriel Elior {
26136907cd5SAriel Elior struct qed_db_recovery_entry *db_entry = NULL;
26236907cd5SAriel Elior
26336907cd5SAriel Elior DP_NOTICE(p_hwfn,
264d1ecf8a6SColin Ian King "Displaying doorbell recovery database. Counter was %d\n",
26536907cd5SAriel Elior p_hwfn->db_recovery_info.db_recovery_counter);
26636907cd5SAriel Elior
26736907cd5SAriel Elior /* Protect the list */
26836907cd5SAriel Elior spin_lock_bh(&p_hwfn->db_recovery_info.lock);
26936907cd5SAriel Elior list_for_each_entry(db_entry,
27036907cd5SAriel Elior &p_hwfn->db_recovery_info.list, list_entry) {
27136907cd5SAriel Elior qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
27236907cd5SAriel Elior }
27336907cd5SAriel Elior
27436907cd5SAriel Elior spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
27536907cd5SAriel Elior }
27636907cd5SAriel Elior
27736907cd5SAriel Elior /* Ring the doorbell of a single doorbell recovery entry */
qed_db_recovery_ring(struct qed_hwfn * p_hwfn,struct qed_db_recovery_entry * db_entry)27836907cd5SAriel Elior static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
2799ac6bb14SDenis Bolotin struct qed_db_recovery_entry *db_entry)
28036907cd5SAriel Elior {
28136907cd5SAriel Elior /* Print according to width */
28236907cd5SAriel Elior if (db_entry->db_width == DB_REC_WIDTH_32B) {
28336907cd5SAriel Elior DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
2849ac6bb14SDenis Bolotin "ringing doorbell address %p data %x\n",
28536907cd5SAriel Elior db_entry->db_addr,
28636907cd5SAriel Elior *(u32 *)db_entry->db_data);
28736907cd5SAriel Elior } else {
28836907cd5SAriel Elior DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
2899ac6bb14SDenis Bolotin "ringing doorbell address %p data %llx\n",
29036907cd5SAriel Elior db_entry->db_addr,
29136907cd5SAriel Elior *(u64 *)(db_entry->db_data));
29236907cd5SAriel Elior }
29336907cd5SAriel Elior
29436907cd5SAriel Elior /* Sanity */
29536907cd5SAriel Elior if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
296b61b04adSDenis Bolotin db_entry->db_width, db_entry->db_data))
29736907cd5SAriel Elior return;
29836907cd5SAriel Elior
29936907cd5SAriel Elior /* Flush the write combined buffer. Since there are multiple doorbelling
30036907cd5SAriel Elior * entities using the same address, if we don't flush, a transaction
30136907cd5SAriel Elior * could be lost.
30236907cd5SAriel Elior */
30336907cd5SAriel Elior wmb();
30436907cd5SAriel Elior
30536907cd5SAriel Elior /* Ring the doorbell */
30636907cd5SAriel Elior if (db_entry->db_width == DB_REC_WIDTH_32B)
30736907cd5SAriel Elior DIRECT_REG_WR(db_entry->db_addr,
30836907cd5SAriel Elior *(u32 *)(db_entry->db_data));
30936907cd5SAriel Elior else
31036907cd5SAriel Elior DIRECT_REG_WR64(db_entry->db_addr,
31136907cd5SAriel Elior *(u64 *)(db_entry->db_data));
31236907cd5SAriel Elior
31336907cd5SAriel Elior /* Flush the write combined buffer. Next doorbell may come from a
31436907cd5SAriel Elior * different entity to the same address...
31536907cd5SAriel Elior */
31636907cd5SAriel Elior wmb();
31736907cd5SAriel Elior }
31836907cd5SAriel Elior
31936907cd5SAriel Elior /* Traverse the doorbell recovery entry list and ring all the doorbells */
qed_db_recovery_execute(struct qed_hwfn * p_hwfn)3209ac6bb14SDenis Bolotin void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
32136907cd5SAriel Elior {
32236907cd5SAriel Elior struct qed_db_recovery_entry *db_entry = NULL;
32336907cd5SAriel Elior
3249ac6bb14SDenis Bolotin DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
32536907cd5SAriel Elior p_hwfn->db_recovery_info.db_recovery_counter);
32636907cd5SAriel Elior
32736907cd5SAriel Elior /* Track amount of times recovery was executed */
32836907cd5SAriel Elior p_hwfn->db_recovery_info.db_recovery_counter++;
32936907cd5SAriel Elior
33036907cd5SAriel Elior /* Protect the list */
33136907cd5SAriel Elior spin_lock_bh(&p_hwfn->db_recovery_info.lock);
33236907cd5SAriel Elior list_for_each_entry(db_entry,
3339ac6bb14SDenis Bolotin &p_hwfn->db_recovery_info.list, list_entry)
3349ac6bb14SDenis Bolotin qed_db_recovery_ring(p_hwfn, db_entry);
33536907cd5SAriel Elior spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
33636907cd5SAriel Elior }
33736907cd5SAriel Elior
33836907cd5SAriel Elior /******************** Doorbell Recovery end ****************/
33936907cd5SAriel Elior
34079284adeSMichal Kalderon /********************************** NIG LLH ***********************************/
34179284adeSMichal Kalderon
34279284adeSMichal Kalderon enum qed_llh_filter_type {
34379284adeSMichal Kalderon QED_LLH_FILTER_TYPE_MAC,
34479284adeSMichal Kalderon QED_LLH_FILTER_TYPE_PROTOCOL,
34579284adeSMichal Kalderon };
34679284adeSMichal Kalderon
34779284adeSMichal Kalderon struct qed_llh_mac_filter {
34879284adeSMichal Kalderon u8 addr[ETH_ALEN];
34979284adeSMichal Kalderon };
35079284adeSMichal Kalderon
35179284adeSMichal Kalderon struct qed_llh_protocol_filter {
35279284adeSMichal Kalderon enum qed_llh_prot_filter_type_t type;
35379284adeSMichal Kalderon u16 source_port_or_eth_type;
35479284adeSMichal Kalderon u16 dest_port;
35579284adeSMichal Kalderon };
35679284adeSMichal Kalderon
35779284adeSMichal Kalderon union qed_llh_filter {
35879284adeSMichal Kalderon struct qed_llh_mac_filter mac;
35979284adeSMichal Kalderon struct qed_llh_protocol_filter protocol;
36079284adeSMichal Kalderon };
36179284adeSMichal Kalderon
36279284adeSMichal Kalderon struct qed_llh_filter_info {
36379284adeSMichal Kalderon bool b_enabled;
36479284adeSMichal Kalderon u32 ref_cnt;
36579284adeSMichal Kalderon enum qed_llh_filter_type type;
36679284adeSMichal Kalderon union qed_llh_filter filter;
36779284adeSMichal Kalderon };
36879284adeSMichal Kalderon
36979284adeSMichal Kalderon struct qed_llh_info {
37079284adeSMichal Kalderon /* Number of LLH filters banks */
37179284adeSMichal Kalderon u8 num_ppfid;
37279284adeSMichal Kalderon
37379284adeSMichal Kalderon #define MAX_NUM_PPFID 8
37479284adeSMichal Kalderon u8 ppfid_array[MAX_NUM_PPFID];
37579284adeSMichal Kalderon
37679284adeSMichal Kalderon /* Array of filters arrays:
37779284adeSMichal Kalderon * "num_ppfid" elements of filters banks, where each is an array of
37879284adeSMichal Kalderon * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters.
37979284adeSMichal Kalderon */
38079284adeSMichal Kalderon struct qed_llh_filter_info **pp_filters;
38179284adeSMichal Kalderon };
38279284adeSMichal Kalderon
qed_llh_free(struct qed_dev * cdev)38379284adeSMichal Kalderon static void qed_llh_free(struct qed_dev *cdev)
38479284adeSMichal Kalderon {
38579284adeSMichal Kalderon struct qed_llh_info *p_llh_info = cdev->p_llh_info;
38679284adeSMichal Kalderon u32 i;
38779284adeSMichal Kalderon
38879284adeSMichal Kalderon if (p_llh_info) {
38979284adeSMichal Kalderon if (p_llh_info->pp_filters)
39079284adeSMichal Kalderon for (i = 0; i < p_llh_info->num_ppfid; i++)
39179284adeSMichal Kalderon kfree(p_llh_info->pp_filters[i]);
39279284adeSMichal Kalderon
39379284adeSMichal Kalderon kfree(p_llh_info->pp_filters);
39479284adeSMichal Kalderon }
39579284adeSMichal Kalderon
39679284adeSMichal Kalderon kfree(p_llh_info);
39779284adeSMichal Kalderon cdev->p_llh_info = NULL;
39879284adeSMichal Kalderon }
39979284adeSMichal Kalderon
qed_llh_alloc(struct qed_dev * cdev)40079284adeSMichal Kalderon static int qed_llh_alloc(struct qed_dev *cdev)
40179284adeSMichal Kalderon {
40279284adeSMichal Kalderon struct qed_llh_info *p_llh_info;
40379284adeSMichal Kalderon u32 size, i;
40479284adeSMichal Kalderon
40579284adeSMichal Kalderon p_llh_info = kzalloc(sizeof(*p_llh_info), GFP_KERNEL);
40679284adeSMichal Kalderon if (!p_llh_info)
40779284adeSMichal Kalderon return -ENOMEM;
40879284adeSMichal Kalderon cdev->p_llh_info = p_llh_info;
40979284adeSMichal Kalderon
41079284adeSMichal Kalderon for (i = 0; i < MAX_NUM_PPFID; i++) {
41179284adeSMichal Kalderon if (!(cdev->ppfid_bitmap & (0x1 << i)))
41279284adeSMichal Kalderon continue;
41379284adeSMichal Kalderon
41479284adeSMichal Kalderon p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
415b6afeb87SJustin Stitt DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %u\n",
41679284adeSMichal Kalderon p_llh_info->num_ppfid, i);
41779284adeSMichal Kalderon p_llh_info->num_ppfid++;
41879284adeSMichal Kalderon }
41979284adeSMichal Kalderon
42079284adeSMichal Kalderon size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters);
42179284adeSMichal Kalderon p_llh_info->pp_filters = kzalloc(size, GFP_KERNEL);
42279284adeSMichal Kalderon if (!p_llh_info->pp_filters)
42379284adeSMichal Kalderon return -ENOMEM;
42479284adeSMichal Kalderon
42579284adeSMichal Kalderon size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
42679284adeSMichal Kalderon sizeof(**p_llh_info->pp_filters);
42779284adeSMichal Kalderon for (i = 0; i < p_llh_info->num_ppfid; i++) {
42879284adeSMichal Kalderon p_llh_info->pp_filters[i] = kzalloc(size, GFP_KERNEL);
42979284adeSMichal Kalderon if (!p_llh_info->pp_filters[i])
43079284adeSMichal Kalderon return -ENOMEM;
43179284adeSMichal Kalderon }
43279284adeSMichal Kalderon
43379284adeSMichal Kalderon return 0;
43479284adeSMichal Kalderon }
43579284adeSMichal Kalderon
qed_llh_shadow_sanity(struct qed_dev * cdev,u8 ppfid,u8 filter_idx,const char * action)43679284adeSMichal Kalderon static int qed_llh_shadow_sanity(struct qed_dev *cdev,
43779284adeSMichal Kalderon u8 ppfid, u8 filter_idx, const char *action)
43879284adeSMichal Kalderon {
43979284adeSMichal Kalderon struct qed_llh_info *p_llh_info = cdev->p_llh_info;
44079284adeSMichal Kalderon
44179284adeSMichal Kalderon if (ppfid >= p_llh_info->num_ppfid) {
44279284adeSMichal Kalderon DP_NOTICE(cdev,
44379284adeSMichal Kalderon "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n",
44479284adeSMichal Kalderon action, ppfid, p_llh_info->num_ppfid);
44579284adeSMichal Kalderon return -EINVAL;
44679284adeSMichal Kalderon }
44779284adeSMichal Kalderon
44879284adeSMichal Kalderon if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
44979284adeSMichal Kalderon DP_NOTICE(cdev,
45079284adeSMichal Kalderon "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n",
45179284adeSMichal Kalderon action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE);
45279284adeSMichal Kalderon return -EINVAL;
45379284adeSMichal Kalderon }
45479284adeSMichal Kalderon
45579284adeSMichal Kalderon return 0;
45679284adeSMichal Kalderon }
45779284adeSMichal Kalderon
45879284adeSMichal Kalderon #define QED_LLH_INVALID_FILTER_IDX 0xff
45979284adeSMichal Kalderon
46079284adeSMichal Kalderon static int
qed_llh_shadow_search_filter(struct qed_dev * cdev,u8 ppfid,union qed_llh_filter * p_filter,u8 * p_filter_idx)46179284adeSMichal Kalderon qed_llh_shadow_search_filter(struct qed_dev *cdev,
46279284adeSMichal Kalderon u8 ppfid,
46379284adeSMichal Kalderon union qed_llh_filter *p_filter, u8 *p_filter_idx)
46479284adeSMichal Kalderon {
46579284adeSMichal Kalderon struct qed_llh_info *p_llh_info = cdev->p_llh_info;
46679284adeSMichal Kalderon struct qed_llh_filter_info *p_filters;
46779284adeSMichal Kalderon int rc;
46879284adeSMichal Kalderon u8 i;
46979284adeSMichal Kalderon
47079284adeSMichal Kalderon rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "search");
47179284adeSMichal Kalderon if (rc)
47279284adeSMichal Kalderon return rc;
47379284adeSMichal Kalderon
47479284adeSMichal Kalderon *p_filter_idx = QED_LLH_INVALID_FILTER_IDX;
47579284adeSMichal Kalderon
47679284adeSMichal Kalderon p_filters = p_llh_info->pp_filters[ppfid];
47779284adeSMichal Kalderon for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
47879284adeSMichal Kalderon if (!memcmp(p_filter, &p_filters[i].filter,
47979284adeSMichal Kalderon sizeof(*p_filter))) {
48079284adeSMichal Kalderon *p_filter_idx = i;
48179284adeSMichal Kalderon break;
48279284adeSMichal Kalderon }
48379284adeSMichal Kalderon }
48479284adeSMichal Kalderon
48579284adeSMichal Kalderon return 0;
48679284adeSMichal Kalderon }
48779284adeSMichal Kalderon
48879284adeSMichal Kalderon static int
qed_llh_shadow_get_free_idx(struct qed_dev * cdev,u8 ppfid,u8 * p_filter_idx)48979284adeSMichal Kalderon qed_llh_shadow_get_free_idx(struct qed_dev *cdev, u8 ppfid, u8 *p_filter_idx)
49079284adeSMichal Kalderon {
49179284adeSMichal Kalderon struct qed_llh_info *p_llh_info = cdev->p_llh_info;
49279284adeSMichal Kalderon struct qed_llh_filter_info *p_filters;
49379284adeSMichal Kalderon int rc;
49479284adeSMichal Kalderon u8 i;
49579284adeSMichal Kalderon
49679284adeSMichal Kalderon rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "get_free_idx");
49779284adeSMichal Kalderon if (rc)
49879284adeSMichal Kalderon return rc;
49979284adeSMichal Kalderon
50079284adeSMichal Kalderon *p_filter_idx = QED_LLH_INVALID_FILTER_IDX;
50179284adeSMichal Kalderon
50279284adeSMichal Kalderon p_filters = p_llh_info->pp_filters[ppfid];
50379284adeSMichal Kalderon for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
50479284adeSMichal Kalderon if (!p_filters[i].b_enabled) {
50579284adeSMichal Kalderon *p_filter_idx = i;
50679284adeSMichal Kalderon break;
50779284adeSMichal Kalderon }
50879284adeSMichal Kalderon }
50979284adeSMichal Kalderon
51079284adeSMichal Kalderon return 0;
51179284adeSMichal Kalderon }
51279284adeSMichal Kalderon
51379284adeSMichal Kalderon static int
__qed_llh_shadow_add_filter(struct qed_dev * cdev,u8 ppfid,u8 filter_idx,enum qed_llh_filter_type type,union qed_llh_filter * p_filter,u32 * p_ref_cnt)51479284adeSMichal Kalderon __qed_llh_shadow_add_filter(struct qed_dev *cdev,
51579284adeSMichal Kalderon u8 ppfid,
51679284adeSMichal Kalderon u8 filter_idx,
51779284adeSMichal Kalderon enum qed_llh_filter_type type,
51879284adeSMichal Kalderon union qed_llh_filter *p_filter, u32 *p_ref_cnt)
51979284adeSMichal Kalderon {
52079284adeSMichal Kalderon struct qed_llh_info *p_llh_info = cdev->p_llh_info;
52179284adeSMichal Kalderon struct qed_llh_filter_info *p_filters;
52279284adeSMichal Kalderon int rc;
52379284adeSMichal Kalderon
52479284adeSMichal Kalderon rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "add");
52579284adeSMichal Kalderon if (rc)
52679284adeSMichal Kalderon return rc;
52779284adeSMichal Kalderon
52879284adeSMichal Kalderon p_filters = p_llh_info->pp_filters[ppfid];
52979284adeSMichal Kalderon if (!p_filters[filter_idx].ref_cnt) {
53079284adeSMichal Kalderon p_filters[filter_idx].b_enabled = true;
53179284adeSMichal Kalderon p_filters[filter_idx].type = type;
53279284adeSMichal Kalderon memcpy(&p_filters[filter_idx].filter, p_filter,
53379284adeSMichal Kalderon sizeof(p_filters[filter_idx].filter));
53479284adeSMichal Kalderon }
53579284adeSMichal Kalderon
53679284adeSMichal Kalderon *p_ref_cnt = ++p_filters[filter_idx].ref_cnt;
53779284adeSMichal Kalderon
53879284adeSMichal Kalderon return 0;
53979284adeSMichal Kalderon }
54079284adeSMichal Kalderon
54179284adeSMichal Kalderon static int
qed_llh_shadow_add_filter(struct qed_dev * cdev,u8 ppfid,enum qed_llh_filter_type type,union qed_llh_filter * p_filter,u8 * p_filter_idx,u32 * p_ref_cnt)54279284adeSMichal Kalderon qed_llh_shadow_add_filter(struct qed_dev *cdev,
54379284adeSMichal Kalderon u8 ppfid,
54479284adeSMichal Kalderon enum qed_llh_filter_type type,
54579284adeSMichal Kalderon union qed_llh_filter *p_filter,
54679284adeSMichal Kalderon u8 *p_filter_idx, u32 *p_ref_cnt)
54779284adeSMichal Kalderon {
54879284adeSMichal Kalderon int rc;
54979284adeSMichal Kalderon
55079284adeSMichal Kalderon /* Check if the same filter already exist */
55179284adeSMichal Kalderon rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx);
55279284adeSMichal Kalderon if (rc)
55379284adeSMichal Kalderon return rc;
55479284adeSMichal Kalderon
55579284adeSMichal Kalderon /* Find a new entry in case of a new filter */
55679284adeSMichal Kalderon if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
55779284adeSMichal Kalderon rc = qed_llh_shadow_get_free_idx(cdev, ppfid, p_filter_idx);
55879284adeSMichal Kalderon if (rc)
55979284adeSMichal Kalderon return rc;
56079284adeSMichal Kalderon }
56179284adeSMichal Kalderon
56279284adeSMichal Kalderon /* No free entry was found */
56379284adeSMichal Kalderon if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
56479284adeSMichal Kalderon DP_NOTICE(cdev,
56579284adeSMichal Kalderon "Failed to find an empty LLH filter to utilize [ppfid %d]\n",
56679284adeSMichal Kalderon ppfid);
56779284adeSMichal Kalderon return -EINVAL;
56879284adeSMichal Kalderon }
56979284adeSMichal Kalderon
57079284adeSMichal Kalderon return __qed_llh_shadow_add_filter(cdev, ppfid, *p_filter_idx, type,
57179284adeSMichal Kalderon p_filter, p_ref_cnt);
57279284adeSMichal Kalderon }
57379284adeSMichal Kalderon
57479284adeSMichal Kalderon static int
__qed_llh_shadow_remove_filter(struct qed_dev * cdev,u8 ppfid,u8 filter_idx,u32 * p_ref_cnt)57579284adeSMichal Kalderon __qed_llh_shadow_remove_filter(struct qed_dev *cdev,
57679284adeSMichal Kalderon u8 ppfid, u8 filter_idx, u32 *p_ref_cnt)
57779284adeSMichal Kalderon {
57879284adeSMichal Kalderon struct qed_llh_info *p_llh_info = cdev->p_llh_info;
57979284adeSMichal Kalderon struct qed_llh_filter_info *p_filters;
58079284adeSMichal Kalderon int rc;
58179284adeSMichal Kalderon
58279284adeSMichal Kalderon rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "remove");
58379284adeSMichal Kalderon if (rc)
58479284adeSMichal Kalderon return rc;
58579284adeSMichal Kalderon
58679284adeSMichal Kalderon p_filters = p_llh_info->pp_filters[ppfid];
58779284adeSMichal Kalderon if (!p_filters[filter_idx].ref_cnt) {
58879284adeSMichal Kalderon DP_NOTICE(cdev,
58979284adeSMichal Kalderon "LLH shadow: trying to remove a filter with ref_cnt=0\n");
59079284adeSMichal Kalderon return -EINVAL;
59179284adeSMichal Kalderon }
59279284adeSMichal Kalderon
59379284adeSMichal Kalderon *p_ref_cnt = --p_filters[filter_idx].ref_cnt;
59479284adeSMichal Kalderon if (!p_filters[filter_idx].ref_cnt)
59579284adeSMichal Kalderon memset(&p_filters[filter_idx],
59679284adeSMichal Kalderon 0, sizeof(p_filters[filter_idx]));
59779284adeSMichal Kalderon
59879284adeSMichal Kalderon return 0;
59979284adeSMichal Kalderon }
60079284adeSMichal Kalderon
60179284adeSMichal Kalderon static int
qed_llh_shadow_remove_filter(struct qed_dev * cdev,u8 ppfid,union qed_llh_filter * p_filter,u8 * p_filter_idx,u32 * p_ref_cnt)60279284adeSMichal Kalderon qed_llh_shadow_remove_filter(struct qed_dev *cdev,
60379284adeSMichal Kalderon u8 ppfid,
60479284adeSMichal Kalderon union qed_llh_filter *p_filter,
60579284adeSMichal Kalderon u8 *p_filter_idx, u32 *p_ref_cnt)
60679284adeSMichal Kalderon {
60779284adeSMichal Kalderon int rc;
60879284adeSMichal Kalderon
60979284adeSMichal Kalderon rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx);
61079284adeSMichal Kalderon if (rc)
61179284adeSMichal Kalderon return rc;
61279284adeSMichal Kalderon
61379284adeSMichal Kalderon /* No matching filter was found */
61479284adeSMichal Kalderon if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
61579284adeSMichal Kalderon DP_NOTICE(cdev, "Failed to find a filter in the LLH shadow\n");
61679284adeSMichal Kalderon return -EINVAL;
61779284adeSMichal Kalderon }
61879284adeSMichal Kalderon
61979284adeSMichal Kalderon return __qed_llh_shadow_remove_filter(cdev, ppfid, *p_filter_idx,
62079284adeSMichal Kalderon p_ref_cnt);
62179284adeSMichal Kalderon }
62279284adeSMichal Kalderon
qed_llh_abs_ppfid(struct qed_dev * cdev,u8 ppfid,u8 * p_abs_ppfid)62379284adeSMichal Kalderon static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid)
62479284adeSMichal Kalderon {
62579284adeSMichal Kalderon struct qed_llh_info *p_llh_info = cdev->p_llh_info;
62679284adeSMichal Kalderon
62779284adeSMichal Kalderon if (ppfid >= p_llh_info->num_ppfid) {
62879284adeSMichal Kalderon DP_NOTICE(cdev,
629b6afeb87SJustin Stitt "ppfid %d is not valid, available indices are 0..%d\n",
63079284adeSMichal Kalderon ppfid, p_llh_info->num_ppfid - 1);
631815deee0SArnd Bergmann *p_abs_ppfid = 0;
63279284adeSMichal Kalderon return -EINVAL;
63379284adeSMichal Kalderon }
63479284adeSMichal Kalderon
63579284adeSMichal Kalderon *p_abs_ppfid = p_llh_info->ppfid_array[ppfid];
63679284adeSMichal Kalderon
63779284adeSMichal Kalderon return 0;
63879284adeSMichal Kalderon }
63979284adeSMichal Kalderon
64079284adeSMichal Kalderon static int
qed_llh_set_engine_affin(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)64179284adeSMichal Kalderon qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
64279284adeSMichal Kalderon {
64379284adeSMichal Kalderon struct qed_dev *cdev = p_hwfn->cdev;
64479284adeSMichal Kalderon enum qed_eng eng;
64579284adeSMichal Kalderon u8 ppfid;
64679284adeSMichal Kalderon int rc;
64779284adeSMichal Kalderon
64879284adeSMichal Kalderon rc = qed_mcp_get_engine_config(p_hwfn, p_ptt);
64979284adeSMichal Kalderon if (rc != 0 && rc != -EOPNOTSUPP) {
65079284adeSMichal Kalderon DP_NOTICE(p_hwfn,
65179284adeSMichal Kalderon "Failed to get the engine affinity configuration\n");
65279284adeSMichal Kalderon return rc;
65379284adeSMichal Kalderon }
65479284adeSMichal Kalderon
65579284adeSMichal Kalderon /* RoCE PF is bound to a single engine */
65679284adeSMichal Kalderon if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
65779284adeSMichal Kalderon eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
65879284adeSMichal Kalderon rc = qed_llh_set_roce_affinity(cdev, eng);
65979284adeSMichal Kalderon if (rc) {
66079284adeSMichal Kalderon DP_NOTICE(cdev,
66179284adeSMichal Kalderon "Failed to set the RoCE engine affinity\n");
66279284adeSMichal Kalderon return rc;
66379284adeSMichal Kalderon }
66479284adeSMichal Kalderon
66579284adeSMichal Kalderon DP_VERBOSE(cdev,
66679284adeSMichal Kalderon QED_MSG_SP,
66779284adeSMichal Kalderon "LLH: Set the engine affinity of RoCE packets as %d\n",
66879284adeSMichal Kalderon eng);
66979284adeSMichal Kalderon }
67079284adeSMichal Kalderon
67179284adeSMichal Kalderon /* Storage PF is bound to a single engine while L2 PF uses both */
672897e87a1SShai Malin if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn) ||
673897e87a1SShai Malin QED_IS_NVMETCP_PERSONALITY(p_hwfn))
67479284adeSMichal Kalderon eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
67579284adeSMichal Kalderon else /* L2_PERSONALITY */
67679284adeSMichal Kalderon eng = QED_BOTH_ENG;
67779284adeSMichal Kalderon
67879284adeSMichal Kalderon for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
67979284adeSMichal Kalderon rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
68079284adeSMichal Kalderon if (rc) {
68179284adeSMichal Kalderon DP_NOTICE(cdev,
68279284adeSMichal Kalderon "Failed to set the engine affinity of ppfid %d\n",
68379284adeSMichal Kalderon ppfid);
68479284adeSMichal Kalderon return rc;
68579284adeSMichal Kalderon }
68679284adeSMichal Kalderon }
68779284adeSMichal Kalderon
68879284adeSMichal Kalderon DP_VERBOSE(cdev, QED_MSG_SP,
68979284adeSMichal Kalderon "LLH: Set the engine affinity of non-RoCE packets as %d\n",
69079284adeSMichal Kalderon eng);
69179284adeSMichal Kalderon
69279284adeSMichal Kalderon return 0;
69379284adeSMichal Kalderon }
69479284adeSMichal Kalderon
qed_llh_hw_init_pf(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)69579284adeSMichal Kalderon static int qed_llh_hw_init_pf(struct qed_hwfn *p_hwfn,
69679284adeSMichal Kalderon struct qed_ptt *p_ptt)
69779284adeSMichal Kalderon {
69879284adeSMichal Kalderon struct qed_dev *cdev = p_hwfn->cdev;
69979284adeSMichal Kalderon u8 ppfid, abs_ppfid;
70079284adeSMichal Kalderon int rc;
70179284adeSMichal Kalderon
70279284adeSMichal Kalderon for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
70379284adeSMichal Kalderon u32 addr;
70479284adeSMichal Kalderon
70579284adeSMichal Kalderon rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
70679284adeSMichal Kalderon if (rc)
70779284adeSMichal Kalderon return rc;
70879284adeSMichal Kalderon
70979284adeSMichal Kalderon addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
71079284adeSMichal Kalderon qed_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
71179284adeSMichal Kalderon }
71279284adeSMichal Kalderon
71379284adeSMichal Kalderon if (test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) &&
71479284adeSMichal Kalderon !QED_IS_FCOE_PERSONALITY(p_hwfn)) {
71579284adeSMichal Kalderon rc = qed_llh_add_mac_filter(cdev, 0,
71679284adeSMichal Kalderon p_hwfn->hw_info.hw_mac_addr);
71779284adeSMichal Kalderon if (rc)
71879284adeSMichal Kalderon DP_NOTICE(cdev,
71979284adeSMichal Kalderon "Failed to add an LLH filter with the primary MAC\n");
72079284adeSMichal Kalderon }
72179284adeSMichal Kalderon
72279284adeSMichal Kalderon if (QED_IS_CMT(cdev)) {
72379284adeSMichal Kalderon rc = qed_llh_set_engine_affin(p_hwfn, p_ptt);
72479284adeSMichal Kalderon if (rc)
72579284adeSMichal Kalderon return rc;
72679284adeSMichal Kalderon }
72779284adeSMichal Kalderon
72879284adeSMichal Kalderon return 0;
72979284adeSMichal Kalderon }
73079284adeSMichal Kalderon
qed_llh_get_num_ppfid(struct qed_dev * cdev)73179284adeSMichal Kalderon u8 qed_llh_get_num_ppfid(struct qed_dev *cdev)
73279284adeSMichal Kalderon {
73379284adeSMichal Kalderon return cdev->p_llh_info->num_ppfid;
73479284adeSMichal Kalderon }
73579284adeSMichal Kalderon
73679284adeSMichal Kalderon #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3
73779284adeSMichal Kalderon #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0
73879284adeSMichal Kalderon #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3
73979284adeSMichal Kalderon #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2
74079284adeSMichal Kalderon
qed_llh_set_ppfid_affinity(struct qed_dev * cdev,u8 ppfid,enum qed_eng eng)74179284adeSMichal Kalderon int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng)
74279284adeSMichal Kalderon {
74379284adeSMichal Kalderon struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
74479284adeSMichal Kalderon struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
74579284adeSMichal Kalderon u32 addr, val, eng_sel;
74679284adeSMichal Kalderon u8 abs_ppfid;
74779284adeSMichal Kalderon int rc = 0;
74879284adeSMichal Kalderon
74979284adeSMichal Kalderon if (!p_ptt)
75079284adeSMichal Kalderon return -EAGAIN;
75179284adeSMichal Kalderon
75279284adeSMichal Kalderon if (!QED_IS_CMT(cdev))
75379284adeSMichal Kalderon goto out;
75479284adeSMichal Kalderon
75579284adeSMichal Kalderon rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
75679284adeSMichal Kalderon if (rc)
75779284adeSMichal Kalderon goto out;
75879284adeSMichal Kalderon
75979284adeSMichal Kalderon switch (eng) {
76079284adeSMichal Kalderon case QED_ENG0:
76179284adeSMichal Kalderon eng_sel = 0;
76279284adeSMichal Kalderon break;
76379284adeSMichal Kalderon case QED_ENG1:
76479284adeSMichal Kalderon eng_sel = 1;
76579284adeSMichal Kalderon break;
76679284adeSMichal Kalderon case QED_BOTH_ENG:
76779284adeSMichal Kalderon eng_sel = 2;
76879284adeSMichal Kalderon break;
76979284adeSMichal Kalderon default:
77079284adeSMichal Kalderon DP_NOTICE(cdev, "Invalid affinity value for ppfid [%d]\n", eng);
77179284adeSMichal Kalderon rc = -EINVAL;
77279284adeSMichal Kalderon goto out;
77379284adeSMichal Kalderon }
77479284adeSMichal Kalderon
77579284adeSMichal Kalderon addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
77679284adeSMichal Kalderon val = qed_rd(p_hwfn, p_ptt, addr);
77779284adeSMichal Kalderon SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel);
77879284adeSMichal Kalderon qed_wr(p_hwfn, p_ptt, addr, val);
77979284adeSMichal Kalderon
78079284adeSMichal Kalderon /* The iWARP affinity is set as the affinity of ppfid 0 */
78179284adeSMichal Kalderon if (!ppfid && QED_IS_IWARP_PERSONALITY(p_hwfn))
78279284adeSMichal Kalderon cdev->iwarp_affin = (eng == QED_ENG1) ? 1 : 0;
78379284adeSMichal Kalderon out:
78479284adeSMichal Kalderon qed_ptt_release(p_hwfn, p_ptt);
78579284adeSMichal Kalderon
78679284adeSMichal Kalderon return rc;
78779284adeSMichal Kalderon }
78879284adeSMichal Kalderon
qed_llh_set_roce_affinity(struct qed_dev * cdev,enum qed_eng eng)78979284adeSMichal Kalderon int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng)
79079284adeSMichal Kalderon {
79179284adeSMichal Kalderon struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
79279284adeSMichal Kalderon struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
79379284adeSMichal Kalderon u32 addr, val, eng_sel;
79479284adeSMichal Kalderon u8 ppfid, abs_ppfid;
79579284adeSMichal Kalderon int rc = 0;
79679284adeSMichal Kalderon
79779284adeSMichal Kalderon if (!p_ptt)
79879284adeSMichal Kalderon return -EAGAIN;
79979284adeSMichal Kalderon
80079284adeSMichal Kalderon if (!QED_IS_CMT(cdev))
80179284adeSMichal Kalderon goto out;
80279284adeSMichal Kalderon
80379284adeSMichal Kalderon switch (eng) {
80479284adeSMichal Kalderon case QED_ENG0:
80579284adeSMichal Kalderon eng_sel = 0;
80679284adeSMichal Kalderon break;
80779284adeSMichal Kalderon case QED_ENG1:
80879284adeSMichal Kalderon eng_sel = 1;
80979284adeSMichal Kalderon break;
81079284adeSMichal Kalderon case QED_BOTH_ENG:
81179284adeSMichal Kalderon eng_sel = 2;
81279284adeSMichal Kalderon qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL,
81379284adeSMichal Kalderon 0xf); /* QP bit 15 */
81479284adeSMichal Kalderon break;
81579284adeSMichal Kalderon default:
81679284adeSMichal Kalderon DP_NOTICE(cdev, "Invalid affinity value for RoCE [%d]\n", eng);
81779284adeSMichal Kalderon rc = -EINVAL;
81879284adeSMichal Kalderon goto out;
81979284adeSMichal Kalderon }
82079284adeSMichal Kalderon
82179284adeSMichal Kalderon for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
82279284adeSMichal Kalderon rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
82379284adeSMichal Kalderon if (rc)
82479284adeSMichal Kalderon goto out;
82579284adeSMichal Kalderon
82679284adeSMichal Kalderon addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
82779284adeSMichal Kalderon val = qed_rd(p_hwfn, p_ptt, addr);
82879284adeSMichal Kalderon SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel);
82979284adeSMichal Kalderon qed_wr(p_hwfn, p_ptt, addr, val);
83079284adeSMichal Kalderon }
83179284adeSMichal Kalderon out:
83279284adeSMichal Kalderon qed_ptt_release(p_hwfn, p_ptt);
83379284adeSMichal Kalderon
83479284adeSMichal Kalderon return rc;
83579284adeSMichal Kalderon }
83679284adeSMichal Kalderon
83779284adeSMichal Kalderon struct qed_llh_filter_details {
83879284adeSMichal Kalderon u64 value;
83979284adeSMichal Kalderon u32 mode;
84079284adeSMichal Kalderon u32 protocol_type;
84179284adeSMichal Kalderon u32 hdr_sel;
84279284adeSMichal Kalderon u32 enable;
84379284adeSMichal Kalderon };
84479284adeSMichal Kalderon
84579284adeSMichal Kalderon static int
qed_llh_access_filter(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 abs_ppfid,u8 filter_idx,struct qed_llh_filter_details * p_details)84679284adeSMichal Kalderon qed_llh_access_filter(struct qed_hwfn *p_hwfn,
84779284adeSMichal Kalderon struct qed_ptt *p_ptt,
84879284adeSMichal Kalderon u8 abs_ppfid,
84979284adeSMichal Kalderon u8 filter_idx,
85079284adeSMichal Kalderon struct qed_llh_filter_details *p_details)
85179284adeSMichal Kalderon {
85279284adeSMichal Kalderon struct qed_dmae_params params = {0};
85379284adeSMichal Kalderon u32 addr;
85479284adeSMichal Kalderon u8 pfid;
85579284adeSMichal Kalderon int rc;
85679284adeSMichal Kalderon
85779284adeSMichal Kalderon /* The NIG/LLH registers that are accessed in this function have only 16
85879284adeSMichal Kalderon * rows which are exposed to a PF. I.e. only the 16 filters of its
85979284adeSMichal Kalderon * default ppfid. Accessing filters of other ppfids requires pretending
86079284adeSMichal Kalderon * to another PFs.
86179284adeSMichal Kalderon * The calculation of PPFID->PFID in AH is based on the relative index
86279284adeSMichal Kalderon * of a PF on its port.
86379284adeSMichal Kalderon * For BB the pfid is actually the abs_ppfid.
86479284adeSMichal Kalderon */
86579284adeSMichal Kalderon if (QED_IS_BB(p_hwfn->cdev))
86679284adeSMichal Kalderon pfid = abs_ppfid;
86779284adeSMichal Kalderon else
86879284adeSMichal Kalderon pfid = abs_ppfid * p_hwfn->cdev->num_ports_in_engine +
86979284adeSMichal Kalderon MFW_PORT(p_hwfn);
87079284adeSMichal Kalderon
87179284adeSMichal Kalderon /* Filter enable - should be done first when removing a filter */
87279284adeSMichal Kalderon if (!p_details->enable) {
87379284adeSMichal Kalderon qed_fid_pretend(p_hwfn, p_ptt,
87479284adeSMichal Kalderon pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
87579284adeSMichal Kalderon
87679284adeSMichal Kalderon addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
87779284adeSMichal Kalderon qed_wr(p_hwfn, p_ptt, addr, p_details->enable);
87879284adeSMichal Kalderon
87979284adeSMichal Kalderon qed_fid_pretend(p_hwfn, p_ptt,
88079284adeSMichal Kalderon p_hwfn->rel_pf_id <<
88179284adeSMichal Kalderon PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
88279284adeSMichal Kalderon }
88379284adeSMichal Kalderon
88479284adeSMichal Kalderon /* Filter value */
88579284adeSMichal Kalderon addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
88679284adeSMichal Kalderon
887804c5702SMichal Kalderon SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1);
88879284adeSMichal Kalderon params.dst_pfid = pfid;
88979284adeSMichal Kalderon rc = qed_dmae_host2grc(p_hwfn,
89079284adeSMichal Kalderon p_ptt,
89179284adeSMichal Kalderon (u64)(uintptr_t)&p_details->value,
89279284adeSMichal Kalderon addr, 2 /* size_in_dwords */,
89379284adeSMichal Kalderon ¶ms);
89479284adeSMichal Kalderon if (rc)
89579284adeSMichal Kalderon return rc;
89679284adeSMichal Kalderon
89779284adeSMichal Kalderon qed_fid_pretend(p_hwfn, p_ptt,
89879284adeSMichal Kalderon pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
89979284adeSMichal Kalderon
90079284adeSMichal Kalderon /* Filter mode */
90179284adeSMichal Kalderon addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4;
90279284adeSMichal Kalderon qed_wr(p_hwfn, p_ptt, addr, p_details->mode);
90379284adeSMichal Kalderon
90479284adeSMichal Kalderon /* Filter protocol type */
90579284adeSMichal Kalderon addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4;
90679284adeSMichal Kalderon qed_wr(p_hwfn, p_ptt, addr, p_details->protocol_type);
90779284adeSMichal Kalderon
90879284adeSMichal Kalderon /* Filter header select */
90979284adeSMichal Kalderon addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4;
91079284adeSMichal Kalderon qed_wr(p_hwfn, p_ptt, addr, p_details->hdr_sel);
91179284adeSMichal Kalderon
91279284adeSMichal Kalderon /* Filter enable - should be done last when adding a filter */
91379284adeSMichal Kalderon if (p_details->enable) {
91479284adeSMichal Kalderon addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
91579284adeSMichal Kalderon qed_wr(p_hwfn, p_ptt, addr, p_details->enable);
91679284adeSMichal Kalderon }
91779284adeSMichal Kalderon
91879284adeSMichal Kalderon qed_fid_pretend(p_hwfn, p_ptt,
91979284adeSMichal Kalderon p_hwfn->rel_pf_id <<
92079284adeSMichal Kalderon PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
92179284adeSMichal Kalderon
92279284adeSMichal Kalderon return 0;
92379284adeSMichal Kalderon }
92479284adeSMichal Kalderon
92579284adeSMichal Kalderon static int
qed_llh_add_filter(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 abs_ppfid,u8 filter_idx,u8 filter_prot_type,u32 high,u32 low)92679284adeSMichal Kalderon qed_llh_add_filter(struct qed_hwfn *p_hwfn,
92779284adeSMichal Kalderon struct qed_ptt *p_ptt,
92879284adeSMichal Kalderon u8 abs_ppfid,
92979284adeSMichal Kalderon u8 filter_idx, u8 filter_prot_type, u32 high, u32 low)
93079284adeSMichal Kalderon {
93179284adeSMichal Kalderon struct qed_llh_filter_details filter_details;
93279284adeSMichal Kalderon
93379284adeSMichal Kalderon filter_details.enable = 1;
93479284adeSMichal Kalderon filter_details.value = ((u64)high << 32) | low;
93579284adeSMichal Kalderon filter_details.hdr_sel = 0;
93679284adeSMichal Kalderon filter_details.protocol_type = filter_prot_type;
93779284adeSMichal Kalderon /* Mode: 0: MAC-address classification 1: protocol classification */
93879284adeSMichal Kalderon filter_details.mode = filter_prot_type ? 1 : 0;
93979284adeSMichal Kalderon
94079284adeSMichal Kalderon return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
94179284adeSMichal Kalderon &filter_details);
94279284adeSMichal Kalderon }
94379284adeSMichal Kalderon
94479284adeSMichal Kalderon static int
qed_llh_remove_filter(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 abs_ppfid,u8 filter_idx)94579284adeSMichal Kalderon qed_llh_remove_filter(struct qed_hwfn *p_hwfn,
94679284adeSMichal Kalderon struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
94779284adeSMichal Kalderon {
94879284adeSMichal Kalderon struct qed_llh_filter_details filter_details = {0};
94979284adeSMichal Kalderon
95079284adeSMichal Kalderon return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
95179284adeSMichal Kalderon &filter_details);
95279284adeSMichal Kalderon }
95379284adeSMichal Kalderon
qed_llh_add_mac_filter(struct qed_dev * cdev,u8 ppfid,const u8 mac_addr[ETH_ALEN])95479284adeSMichal Kalderon int qed_llh_add_mac_filter(struct qed_dev *cdev,
95576660757SJakub Kicinski u8 ppfid, const u8 mac_addr[ETH_ALEN])
95679284adeSMichal Kalderon {
95779284adeSMichal Kalderon struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
95879284adeSMichal Kalderon struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
95979284adeSMichal Kalderon union qed_llh_filter filter = {};
96010f468eaSAlexander Lobakin u8 filter_idx, abs_ppfid = 0;
96179284adeSMichal Kalderon u32 high, low, ref_cnt;
96279284adeSMichal Kalderon int rc = 0;
96379284adeSMichal Kalderon
96479284adeSMichal Kalderon if (!p_ptt)
96579284adeSMichal Kalderon return -EAGAIN;
96679284adeSMichal Kalderon
96779284adeSMichal Kalderon if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
96879284adeSMichal Kalderon goto out;
96979284adeSMichal Kalderon
97079284adeSMichal Kalderon memcpy(filter.mac.addr, mac_addr, ETH_ALEN);
97179284adeSMichal Kalderon rc = qed_llh_shadow_add_filter(cdev, ppfid,
97279284adeSMichal Kalderon QED_LLH_FILTER_TYPE_MAC,
97379284adeSMichal Kalderon &filter, &filter_idx, &ref_cnt);
97479284adeSMichal Kalderon if (rc)
97579284adeSMichal Kalderon goto err;
97679284adeSMichal Kalderon
97779284adeSMichal Kalderon /* Configure the LLH only in case of a new the filter */
97879284adeSMichal Kalderon if (ref_cnt == 1) {
97979284adeSMichal Kalderon rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
98079284adeSMichal Kalderon if (rc)
98179284adeSMichal Kalderon goto err;
98279284adeSMichal Kalderon
98379284adeSMichal Kalderon high = mac_addr[1] | (mac_addr[0] << 8);
98479284adeSMichal Kalderon low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) |
98579284adeSMichal Kalderon (mac_addr[2] << 24);
98679284adeSMichal Kalderon rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
98779284adeSMichal Kalderon 0, high, low);
98879284adeSMichal Kalderon if (rc)
98979284adeSMichal Kalderon goto err;
99079284adeSMichal Kalderon }
99179284adeSMichal Kalderon
99279284adeSMichal Kalderon DP_VERBOSE(cdev,
99379284adeSMichal Kalderon QED_MSG_SP,
99479284adeSMichal Kalderon "LLH: Added MAC filter [%pM] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
99579284adeSMichal Kalderon mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt);
99679284adeSMichal Kalderon
99779284adeSMichal Kalderon goto out;
99879284adeSMichal Kalderon
99979284adeSMichal Kalderon err: DP_NOTICE(cdev,
100079284adeSMichal Kalderon "LLH: Failed to add MAC filter [%pM] to ppfid %hhd\n",
100179284adeSMichal Kalderon mac_addr, ppfid);
100279284adeSMichal Kalderon out:
100379284adeSMichal Kalderon qed_ptt_release(p_hwfn, p_ptt);
100479284adeSMichal Kalderon
100579284adeSMichal Kalderon return rc;
100679284adeSMichal Kalderon }
100779284adeSMichal Kalderon
100879284adeSMichal Kalderon static int
qed_llh_protocol_filter_stringify(struct qed_dev * cdev,enum qed_llh_prot_filter_type_t type,u16 source_port_or_eth_type,u16 dest_port,u8 * str,size_t str_len)100979284adeSMichal Kalderon qed_llh_protocol_filter_stringify(struct qed_dev *cdev,
101079284adeSMichal Kalderon enum qed_llh_prot_filter_type_t type,
101179284adeSMichal Kalderon u16 source_port_or_eth_type,
101279284adeSMichal Kalderon u16 dest_port, u8 *str, size_t str_len)
101379284adeSMichal Kalderon {
101479284adeSMichal Kalderon switch (type) {
101579284adeSMichal Kalderon case QED_LLH_FILTER_ETHERTYPE:
101679284adeSMichal Kalderon snprintf(str, str_len, "Ethertype 0x%04x",
101779284adeSMichal Kalderon source_port_or_eth_type);
101879284adeSMichal Kalderon break;
101979284adeSMichal Kalderon case QED_LLH_FILTER_TCP_SRC_PORT:
102079284adeSMichal Kalderon snprintf(str, str_len, "TCP src port 0x%04x",
102179284adeSMichal Kalderon source_port_or_eth_type);
102279284adeSMichal Kalderon break;
102379284adeSMichal Kalderon case QED_LLH_FILTER_UDP_SRC_PORT:
102479284adeSMichal Kalderon snprintf(str, str_len, "UDP src port 0x%04x",
102579284adeSMichal Kalderon source_port_or_eth_type);
102679284adeSMichal Kalderon break;
102779284adeSMichal Kalderon case QED_LLH_FILTER_TCP_DEST_PORT:
102879284adeSMichal Kalderon snprintf(str, str_len, "TCP dst port 0x%04x", dest_port);
102979284adeSMichal Kalderon break;
103079284adeSMichal Kalderon case QED_LLH_FILTER_UDP_DEST_PORT:
103179284adeSMichal Kalderon snprintf(str, str_len, "UDP dst port 0x%04x", dest_port);
103279284adeSMichal Kalderon break;
103379284adeSMichal Kalderon case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
103479284adeSMichal Kalderon snprintf(str, str_len, "TCP src/dst ports 0x%04x/0x%04x",
103579284adeSMichal Kalderon source_port_or_eth_type, dest_port);
103679284adeSMichal Kalderon break;
103779284adeSMichal Kalderon case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
103879284adeSMichal Kalderon snprintf(str, str_len, "UDP src/dst ports 0x%04x/0x%04x",
103979284adeSMichal Kalderon source_port_or_eth_type, dest_port);
104079284adeSMichal Kalderon break;
104179284adeSMichal Kalderon default:
104279284adeSMichal Kalderon DP_NOTICE(cdev,
104379284adeSMichal Kalderon "Non valid LLH protocol filter type %d\n", type);
104479284adeSMichal Kalderon return -EINVAL;
104579284adeSMichal Kalderon }
104679284adeSMichal Kalderon
104779284adeSMichal Kalderon return 0;
104879284adeSMichal Kalderon }
104979284adeSMichal Kalderon
105079284adeSMichal Kalderon static int
qed_llh_protocol_filter_to_hilo(struct qed_dev * cdev,enum qed_llh_prot_filter_type_t type,u16 source_port_or_eth_type,u16 dest_port,u32 * p_high,u32 * p_low)105179284adeSMichal Kalderon qed_llh_protocol_filter_to_hilo(struct qed_dev *cdev,
105279284adeSMichal Kalderon enum qed_llh_prot_filter_type_t type,
105379284adeSMichal Kalderon u16 source_port_or_eth_type,
105479284adeSMichal Kalderon u16 dest_port, u32 *p_high, u32 *p_low)
105579284adeSMichal Kalderon {
105679284adeSMichal Kalderon *p_high = 0;
105779284adeSMichal Kalderon *p_low = 0;
105879284adeSMichal Kalderon
105979284adeSMichal Kalderon switch (type) {
106079284adeSMichal Kalderon case QED_LLH_FILTER_ETHERTYPE:
106179284adeSMichal Kalderon *p_high = source_port_or_eth_type;
106279284adeSMichal Kalderon break;
106379284adeSMichal Kalderon case QED_LLH_FILTER_TCP_SRC_PORT:
106479284adeSMichal Kalderon case QED_LLH_FILTER_UDP_SRC_PORT:
106579284adeSMichal Kalderon *p_low = source_port_or_eth_type << 16;
106679284adeSMichal Kalderon break;
106779284adeSMichal Kalderon case QED_LLH_FILTER_TCP_DEST_PORT:
106879284adeSMichal Kalderon case QED_LLH_FILTER_UDP_DEST_PORT:
106979284adeSMichal Kalderon *p_low = dest_port;
107079284adeSMichal Kalderon break;
107179284adeSMichal Kalderon case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
107279284adeSMichal Kalderon case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
107379284adeSMichal Kalderon *p_low = (source_port_or_eth_type << 16) | dest_port;
107479284adeSMichal Kalderon break;
107579284adeSMichal Kalderon default:
107679284adeSMichal Kalderon DP_NOTICE(cdev,
107779284adeSMichal Kalderon "Non valid LLH protocol filter type %d\n", type);
107879284adeSMichal Kalderon return -EINVAL;
107979284adeSMichal Kalderon }
108079284adeSMichal Kalderon
108179284adeSMichal Kalderon return 0;
108279284adeSMichal Kalderon }
108379284adeSMichal Kalderon
108479284adeSMichal Kalderon int
qed_llh_add_protocol_filter(struct qed_dev * cdev,u8 ppfid,enum qed_llh_prot_filter_type_t type,u16 source_port_or_eth_type,u16 dest_port)108579284adeSMichal Kalderon qed_llh_add_protocol_filter(struct qed_dev *cdev,
108679284adeSMichal Kalderon u8 ppfid,
108779284adeSMichal Kalderon enum qed_llh_prot_filter_type_t type,
108879284adeSMichal Kalderon u16 source_port_or_eth_type, u16 dest_port)
108979284adeSMichal Kalderon {
109079284adeSMichal Kalderon struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
109179284adeSMichal Kalderon struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
109279284adeSMichal Kalderon u8 filter_idx, abs_ppfid, str[32], type_bitmap;
109379284adeSMichal Kalderon union qed_llh_filter filter = {};
109479284adeSMichal Kalderon u32 high, low, ref_cnt;
109579284adeSMichal Kalderon int rc = 0;
109679284adeSMichal Kalderon
109779284adeSMichal Kalderon if (!p_ptt)
109879284adeSMichal Kalderon return -EAGAIN;
109979284adeSMichal Kalderon
110079284adeSMichal Kalderon if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits))
110179284adeSMichal Kalderon goto out;
110279284adeSMichal Kalderon
110379284adeSMichal Kalderon rc = qed_llh_protocol_filter_stringify(cdev, type,
110479284adeSMichal Kalderon source_port_or_eth_type,
110579284adeSMichal Kalderon dest_port, str, sizeof(str));
110679284adeSMichal Kalderon if (rc)
110779284adeSMichal Kalderon goto err;
110879284adeSMichal Kalderon
110979284adeSMichal Kalderon filter.protocol.type = type;
111079284adeSMichal Kalderon filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
111179284adeSMichal Kalderon filter.protocol.dest_port = dest_port;
111279284adeSMichal Kalderon rc = qed_llh_shadow_add_filter(cdev,
111379284adeSMichal Kalderon ppfid,
111479284adeSMichal Kalderon QED_LLH_FILTER_TYPE_PROTOCOL,
111579284adeSMichal Kalderon &filter, &filter_idx, &ref_cnt);
111679284adeSMichal Kalderon if (rc)
111779284adeSMichal Kalderon goto err;
111879284adeSMichal Kalderon
111979284adeSMichal Kalderon rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
112079284adeSMichal Kalderon if (rc)
112179284adeSMichal Kalderon goto err;
112279284adeSMichal Kalderon
11238e2ea3eaSMichal Kalderon /* Configure the LLH only in case of a new the filter */
11248e2ea3eaSMichal Kalderon if (ref_cnt == 1) {
112579284adeSMichal Kalderon rc = qed_llh_protocol_filter_to_hilo(cdev, type,
112679284adeSMichal Kalderon source_port_or_eth_type,
112779284adeSMichal Kalderon dest_port, &high, &low);
112879284adeSMichal Kalderon if (rc)
112979284adeSMichal Kalderon goto err;
113079284adeSMichal Kalderon
113179284adeSMichal Kalderon type_bitmap = 0x1 << type;
113279284adeSMichal Kalderon rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid,
113379284adeSMichal Kalderon filter_idx, type_bitmap, high, low);
113479284adeSMichal Kalderon if (rc)
113579284adeSMichal Kalderon goto err;
113679284adeSMichal Kalderon }
113779284adeSMichal Kalderon
113879284adeSMichal Kalderon DP_VERBOSE(cdev,
113979284adeSMichal Kalderon QED_MSG_SP,
114079284adeSMichal Kalderon "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
114179284adeSMichal Kalderon str, ppfid, abs_ppfid, filter_idx, ref_cnt);
114279284adeSMichal Kalderon
114379284adeSMichal Kalderon goto out;
114479284adeSMichal Kalderon
114579284adeSMichal Kalderon err: DP_NOTICE(p_hwfn,
114679284adeSMichal Kalderon "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n",
114779284adeSMichal Kalderon str, ppfid);
114879284adeSMichal Kalderon out:
114979284adeSMichal Kalderon qed_ptt_release(p_hwfn, p_ptt);
115079284adeSMichal Kalderon
115179284adeSMichal Kalderon return rc;
115279284adeSMichal Kalderon }
115379284adeSMichal Kalderon
qed_llh_remove_mac_filter(struct qed_dev * cdev,u8 ppfid,u8 mac_addr[ETH_ALEN])115479284adeSMichal Kalderon void qed_llh_remove_mac_filter(struct qed_dev *cdev,
115579284adeSMichal Kalderon u8 ppfid, u8 mac_addr[ETH_ALEN])
115679284adeSMichal Kalderon {
115779284adeSMichal Kalderon struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
115879284adeSMichal Kalderon struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
115979284adeSMichal Kalderon union qed_llh_filter filter = {};
116079284adeSMichal Kalderon u8 filter_idx, abs_ppfid;
116179284adeSMichal Kalderon int rc = 0;
116279284adeSMichal Kalderon u32 ref_cnt;
116379284adeSMichal Kalderon
116479284adeSMichal Kalderon if (!p_ptt)
116579284adeSMichal Kalderon return;
116679284adeSMichal Kalderon
116779284adeSMichal Kalderon if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
116879284adeSMichal Kalderon goto out;
116979284adeSMichal Kalderon
1170897e87a1SShai Malin if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
1171897e87a1SShai Malin return;
1172897e87a1SShai Malin
117379284adeSMichal Kalderon ether_addr_copy(filter.mac.addr, mac_addr);
117479284adeSMichal Kalderon rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
117579284adeSMichal Kalderon &ref_cnt);
117679284adeSMichal Kalderon if (rc)
117779284adeSMichal Kalderon goto err;
117879284adeSMichal Kalderon
117979284adeSMichal Kalderon rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
118079284adeSMichal Kalderon if (rc)
118179284adeSMichal Kalderon goto err;
118279284adeSMichal Kalderon
11838e2ea3eaSMichal Kalderon /* Remove from the LLH in case the filter is not in use */
11848e2ea3eaSMichal Kalderon if (!ref_cnt) {
118579284adeSMichal Kalderon rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
118679284adeSMichal Kalderon filter_idx);
118779284adeSMichal Kalderon if (rc)
118879284adeSMichal Kalderon goto err;
118979284adeSMichal Kalderon }
119079284adeSMichal Kalderon
119179284adeSMichal Kalderon DP_VERBOSE(cdev,
119279284adeSMichal Kalderon QED_MSG_SP,
119379284adeSMichal Kalderon "LLH: Removed MAC filter [%pM] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
119479284adeSMichal Kalderon mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt);
119579284adeSMichal Kalderon
119679284adeSMichal Kalderon goto out;
119779284adeSMichal Kalderon
119879284adeSMichal Kalderon err: DP_NOTICE(cdev,
119979284adeSMichal Kalderon "LLH: Failed to remove MAC filter [%pM] from ppfid %hhd\n",
120079284adeSMichal Kalderon mac_addr, ppfid);
120179284adeSMichal Kalderon out:
120279284adeSMichal Kalderon qed_ptt_release(p_hwfn, p_ptt);
120379284adeSMichal Kalderon }
120479284adeSMichal Kalderon
qed_llh_remove_protocol_filter(struct qed_dev * cdev,u8 ppfid,enum qed_llh_prot_filter_type_t type,u16 source_port_or_eth_type,u16 dest_port)120579284adeSMichal Kalderon void qed_llh_remove_protocol_filter(struct qed_dev *cdev,
120679284adeSMichal Kalderon u8 ppfid,
120779284adeSMichal Kalderon enum qed_llh_prot_filter_type_t type,
120879284adeSMichal Kalderon u16 source_port_or_eth_type, u16 dest_port)
120979284adeSMichal Kalderon {
121079284adeSMichal Kalderon struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
121179284adeSMichal Kalderon struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
121279284adeSMichal Kalderon u8 filter_idx, abs_ppfid, str[32];
121379284adeSMichal Kalderon union qed_llh_filter filter = {};
121479284adeSMichal Kalderon int rc = 0;
121579284adeSMichal Kalderon u32 ref_cnt;
121679284adeSMichal Kalderon
121779284adeSMichal Kalderon if (!p_ptt)
121879284adeSMichal Kalderon return;
121979284adeSMichal Kalderon
122079284adeSMichal Kalderon if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits))
122179284adeSMichal Kalderon goto out;
122279284adeSMichal Kalderon
122379284adeSMichal Kalderon rc = qed_llh_protocol_filter_stringify(cdev, type,
122479284adeSMichal Kalderon source_port_or_eth_type,
122579284adeSMichal Kalderon dest_port, str, sizeof(str));
122679284adeSMichal Kalderon if (rc)
122779284adeSMichal Kalderon goto err;
122879284adeSMichal Kalderon
122979284adeSMichal Kalderon filter.protocol.type = type;
123079284adeSMichal Kalderon filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
123179284adeSMichal Kalderon filter.protocol.dest_port = dest_port;
123279284adeSMichal Kalderon rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
123379284adeSMichal Kalderon &ref_cnt);
123479284adeSMichal Kalderon if (rc)
123579284adeSMichal Kalderon goto err;
123679284adeSMichal Kalderon
123779284adeSMichal Kalderon rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
123879284adeSMichal Kalderon if (rc)
123979284adeSMichal Kalderon goto err;
124079284adeSMichal Kalderon
12418e2ea3eaSMichal Kalderon /* Remove from the LLH in case the filter is not in use */
12428e2ea3eaSMichal Kalderon if (!ref_cnt) {
124379284adeSMichal Kalderon rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
124479284adeSMichal Kalderon filter_idx);
124579284adeSMichal Kalderon if (rc)
124679284adeSMichal Kalderon goto err;
124779284adeSMichal Kalderon }
124879284adeSMichal Kalderon
124979284adeSMichal Kalderon DP_VERBOSE(cdev,
125079284adeSMichal Kalderon QED_MSG_SP,
125179284adeSMichal Kalderon "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
125279284adeSMichal Kalderon str, ppfid, abs_ppfid, filter_idx, ref_cnt);
125379284adeSMichal Kalderon
125479284adeSMichal Kalderon goto out;
125579284adeSMichal Kalderon
125679284adeSMichal Kalderon err: DP_NOTICE(cdev,
125779284adeSMichal Kalderon "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n",
125879284adeSMichal Kalderon str, ppfid);
125979284adeSMichal Kalderon out:
126079284adeSMichal Kalderon qed_ptt_release(p_hwfn, p_ptt);
126179284adeSMichal Kalderon }
126279284adeSMichal Kalderon
126379284adeSMichal Kalderon /******************************* NIG LLH - End ********************************/
126479284adeSMichal Kalderon
126551ff1725SRam Amrani #define QED_MIN_DPIS (4)
126651ff1725SRam Amrani #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
126751ff1725SRam Amrani
qed_hw_bar_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum BAR_ID bar_id)126815582962SRahul Verma static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
126915582962SRahul Verma struct qed_ptt *p_ptt, enum BAR_ID bar_id)
1270c2035eeaSRam Amrani {
1271c2035eeaSRam Amrani u32 bar_reg = (bar_id == BAR_ID_0 ?
1272c2035eeaSRam Amrani PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
12731408cc1fSYuval Mintz u32 val;
1274c2035eeaSRam Amrani
12751408cc1fSYuval Mintz if (IS_VF(p_hwfn->cdev))
12761a850bfcSMintz, Yuval return qed_vf_hw_bar_size(p_hwfn, bar_id);
12771408cc1fSYuval Mintz
127815582962SRahul Verma val = qed_rd(p_hwfn, p_ptt, bar_reg);
1279c2035eeaSRam Amrani if (val)
1280c2035eeaSRam Amrani return 1 << (val + 15);
1281c2035eeaSRam Amrani
1282c2035eeaSRam Amrani /* Old MFW initialized above registered only conditionally */
1283c2035eeaSRam Amrani if (p_hwfn->cdev->num_hwfns > 1) {
1284c2035eeaSRam Amrani DP_INFO(p_hwfn,
1285c2035eeaSRam Amrani "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
1286c2035eeaSRam Amrani return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
1287c2035eeaSRam Amrani } else {
1288c2035eeaSRam Amrani DP_INFO(p_hwfn,
1289c2035eeaSRam Amrani "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
1290c2035eeaSRam Amrani return 512 * 1024;
1291c2035eeaSRam Amrani }
1292c2035eeaSRam Amrani }
1293c2035eeaSRam Amrani
qed_init_dp(struct qed_dev * cdev,u32 dp_module,u8 dp_level)12941a635e48SYuval Mintz void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
1295fe56b9e6SYuval Mintz {
1296fe56b9e6SYuval Mintz u32 i;
1297fe56b9e6SYuval Mintz
1298fe56b9e6SYuval Mintz cdev->dp_level = dp_level;
1299fe56b9e6SYuval Mintz cdev->dp_module = dp_module;
1300fe56b9e6SYuval Mintz for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
1301fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1302fe56b9e6SYuval Mintz
1303fe56b9e6SYuval Mintz p_hwfn->dp_level = dp_level;
1304fe56b9e6SYuval Mintz p_hwfn->dp_module = dp_module;
1305fe56b9e6SYuval Mintz }
1306fe56b9e6SYuval Mintz }
1307fe56b9e6SYuval Mintz
qed_init_struct(struct qed_dev * cdev)1308fe56b9e6SYuval Mintz void qed_init_struct(struct qed_dev *cdev)
1309fe56b9e6SYuval Mintz {
1310fe56b9e6SYuval Mintz u8 i;
1311fe56b9e6SYuval Mintz
1312fe56b9e6SYuval Mintz for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
1313fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1314fe56b9e6SYuval Mintz
1315fe56b9e6SYuval Mintz p_hwfn->cdev = cdev;
1316fe56b9e6SYuval Mintz p_hwfn->my_id = i;
1317fe56b9e6SYuval Mintz p_hwfn->b_active = false;
1318fe56b9e6SYuval Mintz
1319fe56b9e6SYuval Mintz mutex_init(&p_hwfn->dmae_info.mutex);
1320fe56b9e6SYuval Mintz }
1321fe56b9e6SYuval Mintz
1322fe56b9e6SYuval Mintz /* hwfn 0 is always active */
1323fe56b9e6SYuval Mintz cdev->hwfns[0].b_active = true;
1324fe56b9e6SYuval Mintz
1325fe56b9e6SYuval Mintz /* set the default cache alignment to 128 */
1326fe56b9e6SYuval Mintz cdev->cache_shift = 7;
1327fe56b9e6SYuval Mintz }
1328fe56b9e6SYuval Mintz
qed_qm_info_free(struct qed_hwfn * p_hwfn)1329fe56b9e6SYuval Mintz static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
1330fe56b9e6SYuval Mintz {
1331fe56b9e6SYuval Mintz struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1332fe56b9e6SYuval Mintz
1333fe56b9e6SYuval Mintz kfree(qm_info->qm_pq_params);
1334fe56b9e6SYuval Mintz qm_info->qm_pq_params = NULL;
1335fe56b9e6SYuval Mintz kfree(qm_info->qm_vport_params);
1336fe56b9e6SYuval Mintz qm_info->qm_vport_params = NULL;
1337fe56b9e6SYuval Mintz kfree(qm_info->qm_port_params);
1338fe56b9e6SYuval Mintz qm_info->qm_port_params = NULL;
1339bcd197c8SManish Chopra kfree(qm_info->wfq_data);
1340bcd197c8SManish Chopra qm_info->wfq_data = NULL;
1341fe56b9e6SYuval Mintz }
1342fe56b9e6SYuval Mintz
qed_dbg_user_data_free(struct qed_hwfn * p_hwfn)1343a3f72307SDenis Bolotin static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
1344a3f72307SDenis Bolotin {
1345a3f72307SDenis Bolotin kfree(p_hwfn->dbg_user_info);
1346a3f72307SDenis Bolotin p_hwfn->dbg_user_info = NULL;
1347a3f72307SDenis Bolotin }
1348a3f72307SDenis Bolotin
qed_resc_free(struct qed_dev * cdev)1349fe56b9e6SYuval Mintz void qed_resc_free(struct qed_dev *cdev)
1350fe56b9e6SYuval Mintz {
135131333c1aSAlexander Lobakin struct qed_rdma_info *rdma_info;
135231333c1aSAlexander Lobakin struct qed_hwfn *p_hwfn;
1353fe56b9e6SYuval Mintz int i;
1354fe56b9e6SYuval Mintz
13550db711bbSMintz, Yuval if (IS_VF(cdev)) {
13560db711bbSMintz, Yuval for_each_hwfn(cdev, i)
13570db711bbSMintz, Yuval qed_l2_free(&cdev->hwfns[i]);
13581408cc1fSYuval Mintz return;
13590db711bbSMintz, Yuval }
13601408cc1fSYuval Mintz
1361fe56b9e6SYuval Mintz kfree(cdev->fw_data);
1362fe56b9e6SYuval Mintz cdev->fw_data = NULL;
1363fe56b9e6SYuval Mintz
1364fe56b9e6SYuval Mintz kfree(cdev->reset_stats);
13653587cb87STomer Tayar cdev->reset_stats = NULL;
1366fe56b9e6SYuval Mintz
136779284adeSMichal Kalderon qed_llh_free(cdev);
136879284adeSMichal Kalderon
1369fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) {
137031333c1aSAlexander Lobakin p_hwfn = cdev->hwfns + i;
137131333c1aSAlexander Lobakin rdma_info = p_hwfn->p_rdma_info;
1372fe56b9e6SYuval Mintz
1373fe56b9e6SYuval Mintz qed_cxt_mngr_free(p_hwfn);
1374fe56b9e6SYuval Mintz qed_qm_info_free(p_hwfn);
1375fe56b9e6SYuval Mintz qed_spq_free(p_hwfn);
13763587cb87STomer Tayar qed_eq_free(p_hwfn);
13773587cb87STomer Tayar qed_consq_free(p_hwfn);
1378fe56b9e6SYuval Mintz qed_int_free(p_hwfn);
13790a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2
13803587cb87STomer Tayar qed_ll2_free(p_hwfn);
13810a7fb11cSYuval Mintz #endif
13821e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
13833587cb87STomer Tayar qed_fcoe_free(p_hwfn);
13841e128c81SArun Easi
13851d6cff4fSYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
13863587cb87STomer Tayar qed_iscsi_free(p_hwfn);
13873587cb87STomer Tayar qed_ooo_free(p_hwfn);
13881d6cff4fSYuval Mintz }
1389291d57f6SMichal Kalderon
1390897e87a1SShai Malin if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
1391897e87a1SShai Malin qed_nvmetcp_free(p_hwfn);
1392897e87a1SShai Malin qed_ooo_free(p_hwfn);
1393897e87a1SShai Malin }
1394897e87a1SShai Malin
139531333c1aSAlexander Lobakin if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
139631333c1aSAlexander Lobakin qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
1397291d57f6SMichal Kalderon qed_rdma_info_free(p_hwfn);
139831333c1aSAlexander Lobakin }
1399291d57f6SMichal Kalderon
1400fe40a830SPrabhakar Kushwaha qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
140132a47e72SYuval Mintz qed_iov_free(p_hwfn);
14020db711bbSMintz, Yuval qed_l2_free(p_hwfn);
1403fe56b9e6SYuval Mintz qed_dmae_info_free(p_hwfn);
1404270837b3Ssudarsana.kalluru@cavium.com qed_dcbx_info_free(p_hwfn);
1405a3f72307SDenis Bolotin qed_dbg_user_data_free(p_hwfn);
1406fe40a830SPrabhakar Kushwaha qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);
140736907cd5SAriel Elior
140836907cd5SAriel Elior /* Destroy doorbell recovery mechanism */
140936907cd5SAriel Elior qed_db_recovery_teardown(p_hwfn);
1410fe56b9e6SYuval Mintz }
1411fe56b9e6SYuval Mintz }
1412fe56b9e6SYuval Mintz
1413b5a9ee7cSAriel Elior /******************** QM initialization *******************/
1414b5a9ee7cSAriel Elior #define ACTIVE_TCS_BMAP 0x9f
1415b5a9ee7cSAriel Elior #define ACTIVE_TCS_BMAP_4PORT_K2 0xf
1416b5a9ee7cSAriel Elior
1417b5a9ee7cSAriel Elior /* determines the physical queue flags for a given PF. */
qed_get_pq_flags(struct qed_hwfn * p_hwfn)1418b5a9ee7cSAriel Elior static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
1419fe56b9e6SYuval Mintz {
1420b5a9ee7cSAriel Elior u32 flags;
1421fe56b9e6SYuval Mintz
1422b5a9ee7cSAriel Elior /* common flags */
1423b5a9ee7cSAriel Elior flags = PQ_FLAGS_LB;
1424fe56b9e6SYuval Mintz
1425b5a9ee7cSAriel Elior /* feature flags */
1426b5a9ee7cSAriel Elior if (IS_QED_SRIOV(p_hwfn->cdev))
1427b5a9ee7cSAriel Elior flags |= PQ_FLAGS_VFS;
1428fe56b9e6SYuval Mintz
1429b5a9ee7cSAriel Elior /* protocol flags */
1430b5a9ee7cSAriel Elior switch (p_hwfn->hw_info.personality) {
1431b5a9ee7cSAriel Elior case QED_PCI_ETH:
1432b5a9ee7cSAriel Elior flags |= PQ_FLAGS_MCOS;
1433b5a9ee7cSAriel Elior break;
1434b5a9ee7cSAriel Elior case QED_PCI_FCOE:
1435b5a9ee7cSAriel Elior flags |= PQ_FLAGS_OFLD;
1436b5a9ee7cSAriel Elior break;
1437b5a9ee7cSAriel Elior case QED_PCI_ISCSI:
1438897e87a1SShai Malin case QED_PCI_NVMETCP:
1439b5a9ee7cSAriel Elior flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
1440b5a9ee7cSAriel Elior break;
1441b5a9ee7cSAriel Elior case QED_PCI_ETH_ROCE:
1442b5a9ee7cSAriel Elior flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
144361be82b0SDenis Bolotin if (IS_QED_MULTI_TC_ROCE(p_hwfn))
144461be82b0SDenis Bolotin flags |= PQ_FLAGS_MTC;
1445b5a9ee7cSAriel Elior break;
144693c45984SKalderon, Michal case QED_PCI_ETH_IWARP:
144793c45984SKalderon, Michal flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
144893c45984SKalderon, Michal PQ_FLAGS_OFLD;
144993c45984SKalderon, Michal break;
1450b5a9ee7cSAriel Elior default:
1451fe56b9e6SYuval Mintz DP_ERR(p_hwfn,
1452b5a9ee7cSAriel Elior "unknown personality %d\n", p_hwfn->hw_info.personality);
1453b5a9ee7cSAriel Elior return 0;
1454fe56b9e6SYuval Mintz }
1455fe56b9e6SYuval Mintz
1456b5a9ee7cSAriel Elior return flags;
1457b5a9ee7cSAriel Elior }
1458b5a9ee7cSAriel Elior
1459b5a9ee7cSAriel Elior /* Getters for resource amounts necessary for qm initialization */
qed_init_qm_get_num_tcs(struct qed_hwfn * p_hwfn)1460bf774d14SYueHaibing static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
1461b5a9ee7cSAriel Elior {
1462b5a9ee7cSAriel Elior return p_hwfn->hw_info.num_hw_tc;
1463b5a9ee7cSAriel Elior }
1464b5a9ee7cSAriel Elior
qed_init_qm_get_num_vfs(struct qed_hwfn * p_hwfn)1465bf774d14SYueHaibing static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
1466b5a9ee7cSAriel Elior {
1467b5a9ee7cSAriel Elior return IS_QED_SRIOV(p_hwfn->cdev) ?
1468b5a9ee7cSAriel Elior p_hwfn->cdev->p_iov_info->total_vfs : 0;
1469b5a9ee7cSAriel Elior }
1470b5a9ee7cSAriel Elior
qed_init_qm_get_num_mtc_tcs(struct qed_hwfn * p_hwfn)147161be82b0SDenis Bolotin static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn)
147261be82b0SDenis Bolotin {
147361be82b0SDenis Bolotin u32 pq_flags = qed_get_pq_flags(p_hwfn);
147461be82b0SDenis Bolotin
147561be82b0SDenis Bolotin if (!(PQ_FLAGS_MTC & pq_flags))
147661be82b0SDenis Bolotin return 1;
147761be82b0SDenis Bolotin
147861be82b0SDenis Bolotin return qed_init_qm_get_num_tcs(p_hwfn);
147961be82b0SDenis Bolotin }
148061be82b0SDenis Bolotin
1481b5a9ee7cSAriel Elior #define NUM_DEFAULT_RLS 1
1482b5a9ee7cSAriel Elior
qed_init_qm_get_num_pf_rls(struct qed_hwfn * p_hwfn)1483bf774d14SYueHaibing static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
1484b5a9ee7cSAriel Elior {
1485b5a9ee7cSAriel Elior u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
1486b5a9ee7cSAriel Elior
1487b5a9ee7cSAriel Elior /* num RLs can't exceed resource amount of rls or vports */
1488b5a9ee7cSAriel Elior num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL),
1489b5a9ee7cSAriel Elior RESC_NUM(p_hwfn, QED_VPORT));
1490b5a9ee7cSAriel Elior
1491b5a9ee7cSAriel Elior /* Make sure after we reserve there's something left */
1492b5a9ee7cSAriel Elior if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
1493b5a9ee7cSAriel Elior return 0;
1494b5a9ee7cSAriel Elior
1495b5a9ee7cSAriel Elior /* subtract rls necessary for VFs and one default one for the PF */
1496b5a9ee7cSAriel Elior num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
1497b5a9ee7cSAriel Elior
1498b5a9ee7cSAriel Elior return num_pf_rls;
1499b5a9ee7cSAriel Elior }
1500b5a9ee7cSAriel Elior
qed_init_qm_get_num_vports(struct qed_hwfn * p_hwfn)1501bf774d14SYueHaibing static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
1502b5a9ee7cSAriel Elior {
1503b5a9ee7cSAriel Elior u32 pq_flags = qed_get_pq_flags(p_hwfn);
1504b5a9ee7cSAriel Elior
1505b5a9ee7cSAriel Elior /* all pqs share the same vport, except for vfs and pf_rl pqs */
1506b5a9ee7cSAriel Elior return (!!(PQ_FLAGS_RLS & pq_flags)) *
1507b5a9ee7cSAriel Elior qed_init_qm_get_num_pf_rls(p_hwfn) +
1508b5a9ee7cSAriel Elior (!!(PQ_FLAGS_VFS & pq_flags)) *
1509b5a9ee7cSAriel Elior qed_init_qm_get_num_vfs(p_hwfn) + 1;
1510b5a9ee7cSAriel Elior }
1511b5a9ee7cSAriel Elior
1512b5a9ee7cSAriel Elior /* calc amount of PQs according to the requested flags */
qed_init_qm_get_num_pqs(struct qed_hwfn * p_hwfn)1513bf774d14SYueHaibing static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
1514b5a9ee7cSAriel Elior {
1515b5a9ee7cSAriel Elior u32 pq_flags = qed_get_pq_flags(p_hwfn);
1516b5a9ee7cSAriel Elior
1517b5a9ee7cSAriel Elior return (!!(PQ_FLAGS_RLS & pq_flags)) *
1518b5a9ee7cSAriel Elior qed_init_qm_get_num_pf_rls(p_hwfn) +
1519b5a9ee7cSAriel Elior (!!(PQ_FLAGS_MCOS & pq_flags)) *
1520b5a9ee7cSAriel Elior qed_init_qm_get_num_tcs(p_hwfn) +
1521b5a9ee7cSAriel Elior (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
152261be82b0SDenis Bolotin (!!(PQ_FLAGS_ACK & pq_flags)) +
152361be82b0SDenis Bolotin (!!(PQ_FLAGS_OFLD & pq_flags)) *
152461be82b0SDenis Bolotin qed_init_qm_get_num_mtc_tcs(p_hwfn) +
152561be82b0SDenis Bolotin (!!(PQ_FLAGS_LLT & pq_flags)) *
152661be82b0SDenis Bolotin qed_init_qm_get_num_mtc_tcs(p_hwfn) +
1527b5a9ee7cSAriel Elior (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
1528b5a9ee7cSAriel Elior }
1529b5a9ee7cSAriel Elior
1530b5a9ee7cSAriel Elior /* initialize the top level QM params */
qed_init_qm_params(struct qed_hwfn * p_hwfn)1531b5a9ee7cSAriel Elior static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
1532b5a9ee7cSAriel Elior {
1533b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1534b5a9ee7cSAriel Elior bool four_port;
1535b5a9ee7cSAriel Elior
1536b5a9ee7cSAriel Elior /* pq and vport bases for this PF */
1537b5a9ee7cSAriel Elior qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
1538b5a9ee7cSAriel Elior qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
1539b5a9ee7cSAriel Elior
1540b5a9ee7cSAriel Elior /* rate limiting and weighted fair queueing are always enabled */
1541c7281d59SGustavo A. R. Silva qm_info->vport_rl_en = true;
1542c7281d59SGustavo A. R. Silva qm_info->vport_wfq_en = true;
1543b5a9ee7cSAriel Elior
1544b5a9ee7cSAriel Elior /* TC config is different for AH 4 port */
154578cea9ffSTomer Tayar four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2;
1546b5a9ee7cSAriel Elior
1547b5a9ee7cSAriel Elior /* in AH 4 port we have fewer TCs per port */
1548b5a9ee7cSAriel Elior qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
1549b5a9ee7cSAriel Elior NUM_OF_PHYS_TCS;
1550b5a9ee7cSAriel Elior
1551b5a9ee7cSAriel Elior /* unless MFW indicated otherwise, ooo_tc == 3 for
1552b5a9ee7cSAriel Elior * AH 4-port and 4 otherwise.
1553fe56b9e6SYuval Mintz */
1554b5a9ee7cSAriel Elior if (!qm_info->ooo_tc)
1555b5a9ee7cSAriel Elior qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
1556b5a9ee7cSAriel Elior DCBX_TCP_OOO_TC;
1557dbb799c3SYuval Mintz }
1558dbb799c3SYuval Mintz
1559b5a9ee7cSAriel Elior /* initialize qm vport params */
qed_init_qm_vport_params(struct qed_hwfn * p_hwfn)1560b5a9ee7cSAriel Elior static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
1561b5a9ee7cSAriel Elior {
1562b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1563b5a9ee7cSAriel Elior u8 i;
1564fe56b9e6SYuval Mintz
1565b5a9ee7cSAriel Elior /* all vports participate in weighted fair queueing */
1566b5a9ee7cSAriel Elior for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
156792fae6fbSMichal Kalderon qm_info->qm_vport_params[i].wfq = 1;
1568fe56b9e6SYuval Mintz }
1569fe56b9e6SYuval Mintz
1570b5a9ee7cSAriel Elior /* initialize qm port params */
qed_init_qm_port_params(struct qed_hwfn * p_hwfn)1571b5a9ee7cSAriel Elior static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
1572b5a9ee7cSAriel Elior {
1573fe56b9e6SYuval Mintz /* Initialize qm port parameters */
157478cea9ffSTomer Tayar u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
15751392d19fSMichal Kalderon struct qed_dev *cdev = p_hwfn->cdev;
1576b5a9ee7cSAriel Elior
1577b5a9ee7cSAriel Elior /* indicate how ooo and high pri traffic is dealt with */
1578b5a9ee7cSAriel Elior active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
1579b5a9ee7cSAriel Elior ACTIVE_TCS_BMAP_4PORT_K2 :
1580b5a9ee7cSAriel Elior ACTIVE_TCS_BMAP;
1581b5a9ee7cSAriel Elior
1582fe56b9e6SYuval Mintz for (i = 0; i < num_ports; i++) {
1583b5a9ee7cSAriel Elior struct init_qm_port_params *p_qm_port =
1584b5a9ee7cSAriel Elior &p_hwfn->qm_info.qm_port_params[i];
15851392d19fSMichal Kalderon u16 pbf_max_cmd_lines;
1586b5a9ee7cSAriel Elior
1587fe56b9e6SYuval Mintz p_qm_port->active = 1;
1588b5a9ee7cSAriel Elior p_qm_port->active_phys_tcs = active_phys_tcs;
15891392d19fSMichal Kalderon pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev);
15901392d19fSMichal Kalderon p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;
15911392d19fSMichal Kalderon p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports;
1592fe56b9e6SYuval Mintz }
1593b5a9ee7cSAriel Elior }
1594fe56b9e6SYuval Mintz
1595b5a9ee7cSAriel Elior /* Reset the params which must be reset for qm init. QM init may be called as
1596b5a9ee7cSAriel Elior * a result of flows other than driver load (e.g. dcbx renegotiation). Other
1597b5a9ee7cSAriel Elior * params may be affected by the init but would simply recalculate to the same
1598b5a9ee7cSAriel Elior * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
1599b5a9ee7cSAriel Elior * affected as these amounts stay the same.
1600b5a9ee7cSAriel Elior */
qed_init_qm_reset_params(struct qed_hwfn * p_hwfn)1601b5a9ee7cSAriel Elior static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn)
1602b5a9ee7cSAriel Elior {
1603b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1604fe56b9e6SYuval Mintz
1605b5a9ee7cSAriel Elior qm_info->num_pqs = 0;
1606b5a9ee7cSAriel Elior qm_info->num_vports = 0;
1607b5a9ee7cSAriel Elior qm_info->num_pf_rls = 0;
1608b5a9ee7cSAriel Elior qm_info->num_vf_pqs = 0;
1609b5a9ee7cSAriel Elior qm_info->first_vf_pq = 0;
1610b5a9ee7cSAriel Elior qm_info->first_mcos_pq = 0;
1611b5a9ee7cSAriel Elior qm_info->first_rl_pq = 0;
1612b5a9ee7cSAriel Elior }
1613fe56b9e6SYuval Mintz
qed_init_qm_advance_vport(struct qed_hwfn * p_hwfn)1614b5a9ee7cSAriel Elior static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
1615b5a9ee7cSAriel Elior {
1616b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1617b5a9ee7cSAriel Elior
1618b5a9ee7cSAriel Elior qm_info->num_vports++;
1619b5a9ee7cSAriel Elior
1620b5a9ee7cSAriel Elior if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
1621b5a9ee7cSAriel Elior DP_ERR(p_hwfn,
1622b5a9ee7cSAriel Elior "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
1623b5a9ee7cSAriel Elior qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
1624b5a9ee7cSAriel Elior }
1625b5a9ee7cSAriel Elior
1626b5a9ee7cSAriel Elior /* initialize a single pq and manage qm_info resources accounting.
1627b5a9ee7cSAriel Elior * The pq_init_flags param determines whether the PQ is rate limited
1628b5a9ee7cSAriel Elior * (for VF or PF) and whether a new vport is allocated to the pq or not
1629b5a9ee7cSAriel Elior * (i.e. vport will be shared).
1630b5a9ee7cSAriel Elior */
1631b5a9ee7cSAriel Elior
1632b5a9ee7cSAriel Elior /* flags for pq init */
1633fe40a830SPrabhakar Kushwaha #define PQ_INIT_SHARE_VPORT BIT(0)
1634fe40a830SPrabhakar Kushwaha #define PQ_INIT_PF_RL BIT(1)
1635fe40a830SPrabhakar Kushwaha #define PQ_INIT_VF_RL BIT(2)
1636b5a9ee7cSAriel Elior
1637b5a9ee7cSAriel Elior /* defines for pq init */
1638b5a9ee7cSAriel Elior #define PQ_INIT_DEFAULT_WRR_GROUP 1
1639b5a9ee7cSAriel Elior #define PQ_INIT_DEFAULT_TC 0
1640c4259ddaSDenis Bolotin
qed_hw_info_set_offload_tc(struct qed_hw_info * p_info,u8 tc)1641c4259ddaSDenis Bolotin void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc)
1642c4259ddaSDenis Bolotin {
1643c4259ddaSDenis Bolotin p_info->offload_tc = tc;
1644c4259ddaSDenis Bolotin p_info->offload_tc_set = true;
1645c4259ddaSDenis Bolotin }
1646c4259ddaSDenis Bolotin
qed_is_offload_tc_set(struct qed_hwfn * p_hwfn)1647c4259ddaSDenis Bolotin static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn)
1648c4259ddaSDenis Bolotin {
1649c4259ddaSDenis Bolotin return p_hwfn->hw_info.offload_tc_set;
1650c4259ddaSDenis Bolotin }
1651c4259ddaSDenis Bolotin
qed_get_offload_tc(struct qed_hwfn * p_hwfn)1652c4259ddaSDenis Bolotin static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn)
1653c4259ddaSDenis Bolotin {
1654c4259ddaSDenis Bolotin if (qed_is_offload_tc_set(p_hwfn))
1655c4259ddaSDenis Bolotin return p_hwfn->hw_info.offload_tc;
1656c4259ddaSDenis Bolotin
1657c4259ddaSDenis Bolotin return PQ_INIT_DEFAULT_TC;
1658c4259ddaSDenis Bolotin }
1659b5a9ee7cSAriel Elior
qed_init_qm_pq(struct qed_hwfn * p_hwfn,struct qed_qm_info * qm_info,u8 tc,u32 pq_init_flags)1660b5a9ee7cSAriel Elior static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
1661b5a9ee7cSAriel Elior struct qed_qm_info *qm_info,
1662b5a9ee7cSAriel Elior u8 tc, u32 pq_init_flags)
1663b5a9ee7cSAriel Elior {
1664b5a9ee7cSAriel Elior u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn);
1665b5a9ee7cSAriel Elior
1666b5a9ee7cSAriel Elior if (pq_idx > max_pq)
1667b5a9ee7cSAriel Elior DP_ERR(p_hwfn,
1668b5a9ee7cSAriel Elior "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
1669b5a9ee7cSAriel Elior
1670b5a9ee7cSAriel Elior /* init pq params */
167150bc60cbSMichal Kalderon qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
1672b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
1673b5a9ee7cSAriel Elior qm_info->num_vports;
1674b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].tc_id = tc;
1675b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
1676b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].rl_valid =
1677b5a9ee7cSAriel Elior (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
1678b5a9ee7cSAriel Elior
1679b5a9ee7cSAriel Elior /* qm params accounting */
1680b5a9ee7cSAriel Elior qm_info->num_pqs++;
1681b5a9ee7cSAriel Elior if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
1682b5a9ee7cSAriel Elior qm_info->num_vports++;
1683b5a9ee7cSAriel Elior
1684b5a9ee7cSAriel Elior if (pq_init_flags & PQ_INIT_PF_RL)
1685b5a9ee7cSAriel Elior qm_info->num_pf_rls++;
1686b5a9ee7cSAriel Elior
1687b5a9ee7cSAriel Elior if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
1688b5a9ee7cSAriel Elior DP_ERR(p_hwfn,
1689b5a9ee7cSAriel Elior "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
1690b5a9ee7cSAriel Elior qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
1691b5a9ee7cSAriel Elior
1692b5a9ee7cSAriel Elior if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn))
1693b5a9ee7cSAriel Elior DP_ERR(p_hwfn,
1694b5a9ee7cSAriel Elior "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
1695b5a9ee7cSAriel Elior qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn));
1696b5a9ee7cSAriel Elior }
1697b5a9ee7cSAriel Elior
1698b5a9ee7cSAriel Elior /* get pq index according to PQ_FLAGS */
qed_init_qm_get_idx_from_flags(struct qed_hwfn * p_hwfn,unsigned long pq_flags)1699b5a9ee7cSAriel Elior static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
1700ffb057f9SManish Chopra unsigned long pq_flags)
1701b5a9ee7cSAriel Elior {
1702b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1703b5a9ee7cSAriel Elior
1704b5a9ee7cSAriel Elior /* Can't have multiple flags set here */
1705ffb057f9SManish Chopra if (bitmap_weight(&pq_flags,
1706276d43f0SDenis Bolotin sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
1707ffb057f9SManish Chopra DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
1708b5a9ee7cSAriel Elior goto err;
1709276d43f0SDenis Bolotin }
1710b5a9ee7cSAriel Elior
1711eb62cca9SDenis Bolotin if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
1712ffb057f9SManish Chopra DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
1713eb62cca9SDenis Bolotin goto err;
1714eb62cca9SDenis Bolotin }
1715eb62cca9SDenis Bolotin
1716b5a9ee7cSAriel Elior switch (pq_flags) {
1717b5a9ee7cSAriel Elior case PQ_FLAGS_RLS:
1718b5a9ee7cSAriel Elior return &qm_info->first_rl_pq;
1719b5a9ee7cSAriel Elior case PQ_FLAGS_MCOS:
1720b5a9ee7cSAriel Elior return &qm_info->first_mcos_pq;
1721b5a9ee7cSAriel Elior case PQ_FLAGS_LB:
1722b5a9ee7cSAriel Elior return &qm_info->pure_lb_pq;
1723b5a9ee7cSAriel Elior case PQ_FLAGS_OOO:
1724b5a9ee7cSAriel Elior return &qm_info->ooo_pq;
1725b5a9ee7cSAriel Elior case PQ_FLAGS_ACK:
1726b5a9ee7cSAriel Elior return &qm_info->pure_ack_pq;
1727b5a9ee7cSAriel Elior case PQ_FLAGS_OFLD:
172861be82b0SDenis Bolotin return &qm_info->first_ofld_pq;
1729b5a9ee7cSAriel Elior case PQ_FLAGS_LLT:
173061be82b0SDenis Bolotin return &qm_info->first_llt_pq;
1731b5a9ee7cSAriel Elior case PQ_FLAGS_VFS:
1732b5a9ee7cSAriel Elior return &qm_info->first_vf_pq;
1733b5a9ee7cSAriel Elior default:
1734b5a9ee7cSAriel Elior goto err;
1735b5a9ee7cSAriel Elior }
1736b5a9ee7cSAriel Elior
1737b5a9ee7cSAriel Elior err:
1738eb62cca9SDenis Bolotin return &qm_info->start_pq;
1739b5a9ee7cSAriel Elior }
1740b5a9ee7cSAriel Elior
1741b5a9ee7cSAriel Elior /* save pq index in qm info */
qed_init_qm_set_idx(struct qed_hwfn * p_hwfn,u32 pq_flags,u16 pq_val)1742b5a9ee7cSAriel Elior static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn,
1743b5a9ee7cSAriel Elior u32 pq_flags, u16 pq_val)
1744b5a9ee7cSAriel Elior {
1745b5a9ee7cSAriel Elior u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
1746b5a9ee7cSAriel Elior
1747b5a9ee7cSAriel Elior *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
1748b5a9ee7cSAriel Elior }
1749b5a9ee7cSAriel Elior
1750b5a9ee7cSAriel Elior /* get tx pq index, with the PQ TX base already set (ready for context init) */
qed_get_cm_pq_idx(struct qed_hwfn * p_hwfn,u32 pq_flags)1751b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags)
1752b5a9ee7cSAriel Elior {
1753b5a9ee7cSAriel Elior u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
1754b5a9ee7cSAriel Elior
1755b5a9ee7cSAriel Elior return *base_pq_idx + CM_TX_PQ_BASE;
1756b5a9ee7cSAriel Elior }
1757b5a9ee7cSAriel Elior
qed_get_cm_pq_idx_mcos(struct qed_hwfn * p_hwfn,u8 tc)1758b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
1759b5a9ee7cSAriel Elior {
1760b5a9ee7cSAriel Elior u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
1761b5a9ee7cSAriel Elior
1762eb62cca9SDenis Bolotin if (max_tc == 0) {
1763eb62cca9SDenis Bolotin DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
1764eb62cca9SDenis Bolotin PQ_FLAGS_MCOS);
1765eb62cca9SDenis Bolotin return p_hwfn->qm_info.start_pq;
1766eb62cca9SDenis Bolotin }
1767eb62cca9SDenis Bolotin
1768b5a9ee7cSAriel Elior if (tc > max_tc)
1769b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
1770b5a9ee7cSAriel Elior
1771eb62cca9SDenis Bolotin return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
1772b5a9ee7cSAriel Elior }
1773b5a9ee7cSAriel Elior
qed_get_cm_pq_idx_vf(struct qed_hwfn * p_hwfn,u16 vf)1774b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
1775b5a9ee7cSAriel Elior {
1776b5a9ee7cSAriel Elior u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
1777b5a9ee7cSAriel Elior
1778eb62cca9SDenis Bolotin if (max_vf == 0) {
1779eb62cca9SDenis Bolotin DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
1780eb62cca9SDenis Bolotin PQ_FLAGS_VFS);
1781eb62cca9SDenis Bolotin return p_hwfn->qm_info.start_pq;
1782eb62cca9SDenis Bolotin }
1783eb62cca9SDenis Bolotin
1784b5a9ee7cSAriel Elior if (vf > max_vf)
1785b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
1786b5a9ee7cSAriel Elior
1787eb62cca9SDenis Bolotin return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
1788b5a9ee7cSAriel Elior }
1789b5a9ee7cSAriel Elior
qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn * p_hwfn,u8 tc)179061be82b0SDenis Bolotin u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
179161be82b0SDenis Bolotin {
179261be82b0SDenis Bolotin u16 first_ofld_pq, pq_offset;
179361be82b0SDenis Bolotin
179461be82b0SDenis Bolotin first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
179561be82b0SDenis Bolotin pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
179661be82b0SDenis Bolotin tc : PQ_INIT_DEFAULT_TC;
179761be82b0SDenis Bolotin
179861be82b0SDenis Bolotin return first_ofld_pq + pq_offset;
179961be82b0SDenis Bolotin }
180061be82b0SDenis Bolotin
qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn * p_hwfn,u8 tc)180161be82b0SDenis Bolotin u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc)
180261be82b0SDenis Bolotin {
180361be82b0SDenis Bolotin u16 first_llt_pq, pq_offset;
180461be82b0SDenis Bolotin
180561be82b0SDenis Bolotin first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
180661be82b0SDenis Bolotin pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
180761be82b0SDenis Bolotin tc : PQ_INIT_DEFAULT_TC;
180861be82b0SDenis Bolotin
180961be82b0SDenis Bolotin return first_llt_pq + pq_offset;
181061be82b0SDenis Bolotin }
181161be82b0SDenis Bolotin
1812b5a9ee7cSAriel Elior /* Functions for creating specific types of pqs */
qed_init_qm_lb_pq(struct qed_hwfn * p_hwfn)1813b5a9ee7cSAriel Elior static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn)
1814b5a9ee7cSAriel Elior {
1815b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1816b5a9ee7cSAriel Elior
1817b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
1818b5a9ee7cSAriel Elior return;
1819b5a9ee7cSAriel Elior
1820b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
1821b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
1822b5a9ee7cSAriel Elior }
1823b5a9ee7cSAriel Elior
qed_init_qm_ooo_pq(struct qed_hwfn * p_hwfn)1824b5a9ee7cSAriel Elior static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn)
1825b5a9ee7cSAriel Elior {
1826b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1827b5a9ee7cSAriel Elior
1828b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
1829b5a9ee7cSAriel Elior return;
1830b5a9ee7cSAriel Elior
1831b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
1832b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
1833b5a9ee7cSAriel Elior }
1834b5a9ee7cSAriel Elior
qed_init_qm_pure_ack_pq(struct qed_hwfn * p_hwfn)1835b5a9ee7cSAriel Elior static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
1836b5a9ee7cSAriel Elior {
1837b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1838b5a9ee7cSAriel Elior
1839b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
1840b5a9ee7cSAriel Elior return;
1841b5a9ee7cSAriel Elior
1842b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
1843c4259ddaSDenis Bolotin qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
1844c4259ddaSDenis Bolotin PQ_INIT_SHARE_VPORT);
1845b5a9ee7cSAriel Elior }
1846b5a9ee7cSAriel Elior
qed_init_qm_mtc_pqs(struct qed_hwfn * p_hwfn)184761be82b0SDenis Bolotin static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn)
184861be82b0SDenis Bolotin {
184961be82b0SDenis Bolotin u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn);
185061be82b0SDenis Bolotin struct qed_qm_info *qm_info = &p_hwfn->qm_info;
185161be82b0SDenis Bolotin u8 tc;
185261be82b0SDenis Bolotin
185361be82b0SDenis Bolotin /* override pq's TC if offload TC is set */
185461be82b0SDenis Bolotin for (tc = 0; tc < num_tcs; tc++)
185561be82b0SDenis Bolotin qed_init_qm_pq(p_hwfn, qm_info,
185661be82b0SDenis Bolotin qed_is_offload_tc_set(p_hwfn) ?
185761be82b0SDenis Bolotin p_hwfn->hw_info.offload_tc : tc,
185861be82b0SDenis Bolotin PQ_INIT_SHARE_VPORT);
185961be82b0SDenis Bolotin }
186061be82b0SDenis Bolotin
qed_init_qm_offload_pq(struct qed_hwfn * p_hwfn)1861b5a9ee7cSAriel Elior static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
1862b5a9ee7cSAriel Elior {
1863b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1864b5a9ee7cSAriel Elior
1865b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
1866b5a9ee7cSAriel Elior return;
1867b5a9ee7cSAriel Elior
1868b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
186961be82b0SDenis Bolotin qed_init_qm_mtc_pqs(p_hwfn);
1870b5a9ee7cSAriel Elior }
1871b5a9ee7cSAriel Elior
qed_init_qm_low_latency_pq(struct qed_hwfn * p_hwfn)1872b5a9ee7cSAriel Elior static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
1873b5a9ee7cSAriel Elior {
1874b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1875b5a9ee7cSAriel Elior
1876b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
1877b5a9ee7cSAriel Elior return;
1878b5a9ee7cSAriel Elior
1879b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
188061be82b0SDenis Bolotin qed_init_qm_mtc_pqs(p_hwfn);
1881b5a9ee7cSAriel Elior }
1882b5a9ee7cSAriel Elior
qed_init_qm_mcos_pqs(struct qed_hwfn * p_hwfn)1883b5a9ee7cSAriel Elior static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
1884b5a9ee7cSAriel Elior {
1885b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1886b5a9ee7cSAriel Elior u8 tc_idx;
1887b5a9ee7cSAriel Elior
1888b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
1889b5a9ee7cSAriel Elior return;
1890b5a9ee7cSAriel Elior
1891b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
1892b5a9ee7cSAriel Elior for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++)
1893b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
1894b5a9ee7cSAriel Elior }
1895b5a9ee7cSAriel Elior
qed_init_qm_vf_pqs(struct qed_hwfn * p_hwfn)1896b5a9ee7cSAriel Elior static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn)
1897b5a9ee7cSAriel Elior {
1898b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1899b5a9ee7cSAriel Elior u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
1900b5a9ee7cSAriel Elior
1901b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
1902b5a9ee7cSAriel Elior return;
1903b5a9ee7cSAriel Elior
1904b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
19051408cc1fSYuval Mintz qm_info->num_vf_pqs = num_vfs;
1906b5a9ee7cSAriel Elior for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
1907b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn,
1908b5a9ee7cSAriel Elior qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
1909b5a9ee7cSAriel Elior }
1910fe56b9e6SYuval Mintz
qed_init_qm_rl_pqs(struct qed_hwfn * p_hwfn)1911b5a9ee7cSAriel Elior static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
1912b5a9ee7cSAriel Elior {
1913b5a9ee7cSAriel Elior u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn);
1914b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1915a64b02d5SManish Chopra
1916b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
1917b5a9ee7cSAriel Elior return;
1918b5a9ee7cSAriel Elior
1919b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
1920b5a9ee7cSAriel Elior for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
1921c4259ddaSDenis Bolotin qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
1922c4259ddaSDenis Bolotin PQ_INIT_PF_RL);
1923b5a9ee7cSAriel Elior }
1924b5a9ee7cSAriel Elior
qed_init_qm_pq_params(struct qed_hwfn * p_hwfn)1925b5a9ee7cSAriel Elior static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
1926b5a9ee7cSAriel Elior {
1927b5a9ee7cSAriel Elior /* rate limited pqs, must come first (FW assumption) */
1928b5a9ee7cSAriel Elior qed_init_qm_rl_pqs(p_hwfn);
1929b5a9ee7cSAriel Elior
1930b5a9ee7cSAriel Elior /* pqs for multi cos */
1931b5a9ee7cSAriel Elior qed_init_qm_mcos_pqs(p_hwfn);
1932b5a9ee7cSAriel Elior
1933b5a9ee7cSAriel Elior /* pure loopback pq */
1934b5a9ee7cSAriel Elior qed_init_qm_lb_pq(p_hwfn);
1935b5a9ee7cSAriel Elior
1936b5a9ee7cSAriel Elior /* out of order pq */
1937b5a9ee7cSAriel Elior qed_init_qm_ooo_pq(p_hwfn);
1938b5a9ee7cSAriel Elior
1939b5a9ee7cSAriel Elior /* pure ack pq */
1940b5a9ee7cSAriel Elior qed_init_qm_pure_ack_pq(p_hwfn);
1941b5a9ee7cSAriel Elior
1942b5a9ee7cSAriel Elior /* pq for offloaded protocol */
1943b5a9ee7cSAriel Elior qed_init_qm_offload_pq(p_hwfn);
1944b5a9ee7cSAriel Elior
1945b5a9ee7cSAriel Elior /* low latency pq */
1946b5a9ee7cSAriel Elior qed_init_qm_low_latency_pq(p_hwfn);
1947b5a9ee7cSAriel Elior
1948b5a9ee7cSAriel Elior /* done sharing vports */
1949b5a9ee7cSAriel Elior qed_init_qm_advance_vport(p_hwfn);
1950b5a9ee7cSAriel Elior
1951b5a9ee7cSAriel Elior /* pqs for vfs */
1952b5a9ee7cSAriel Elior qed_init_qm_vf_pqs(p_hwfn);
1953b5a9ee7cSAriel Elior }
1954b5a9ee7cSAriel Elior
1955b5a9ee7cSAriel Elior /* compare values of getters against resources amounts */
qed_init_qm_sanity(struct qed_hwfn * p_hwfn)1956b5a9ee7cSAriel Elior static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
1957b5a9ee7cSAriel Elior {
1958b5a9ee7cSAriel Elior if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) {
1959b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
1960b5a9ee7cSAriel Elior return -EINVAL;
1961b5a9ee7cSAriel Elior }
1962b5a9ee7cSAriel Elior
196361be82b0SDenis Bolotin if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
196461be82b0SDenis Bolotin return 0;
196561be82b0SDenis Bolotin
196661be82b0SDenis Bolotin if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
196782ebc889SJason Yan p_hwfn->hw_info.multi_tc_roce_en = false;
196861be82b0SDenis Bolotin DP_NOTICE(p_hwfn,
196961be82b0SDenis Bolotin "multi-tc roce was disabled to reduce requested amount of pqs\n");
197061be82b0SDenis Bolotin if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
197161be82b0SDenis Bolotin return 0;
1972b5a9ee7cSAriel Elior }
1973fe56b9e6SYuval Mintz
197461be82b0SDenis Bolotin DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
197561be82b0SDenis Bolotin return -EINVAL;
1976b5a9ee7cSAriel Elior }
1977fe56b9e6SYuval Mintz
qed_dp_init_qm_params(struct qed_hwfn * p_hwfn)1978b5a9ee7cSAriel Elior static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
1979b5a9ee7cSAriel Elior {
1980b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1981b5a9ee7cSAriel Elior struct init_qm_vport_params *vport;
1982b5a9ee7cSAriel Elior struct init_qm_port_params *port;
1983b5a9ee7cSAriel Elior struct init_qm_pq_params *pq;
1984b5a9ee7cSAriel Elior int i, tc;
1985b5a9ee7cSAriel Elior
1986b5a9ee7cSAriel Elior /* top level params */
1987b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn,
1988b5a9ee7cSAriel Elior NETIF_MSG_HW,
198961be82b0SDenis Bolotin "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n",
1990b5a9ee7cSAriel Elior qm_info->start_pq,
1991b5a9ee7cSAriel Elior qm_info->start_vport,
1992b5a9ee7cSAriel Elior qm_info->pure_lb_pq,
199361be82b0SDenis Bolotin qm_info->first_ofld_pq,
199461be82b0SDenis Bolotin qm_info->first_llt_pq,
199561be82b0SDenis Bolotin qm_info->pure_ack_pq);
1996b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn,
1997b5a9ee7cSAriel Elior NETIF_MSG_HW,
1998b5a9ee7cSAriel Elior "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
1999b5a9ee7cSAriel Elior qm_info->ooo_pq,
2000b5a9ee7cSAriel Elior qm_info->first_vf_pq,
2001b5a9ee7cSAriel Elior qm_info->num_pqs,
2002b5a9ee7cSAriel Elior qm_info->num_vf_pqs,
2003b5a9ee7cSAriel Elior qm_info->num_vports, qm_info->max_phys_tcs_per_port);
2004b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn,
2005b5a9ee7cSAriel Elior NETIF_MSG_HW,
2006b5a9ee7cSAriel Elior "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
2007b5a9ee7cSAriel Elior qm_info->pf_rl_en,
2008b5a9ee7cSAriel Elior qm_info->pf_wfq_en,
2009b5a9ee7cSAriel Elior qm_info->vport_rl_en,
2010b5a9ee7cSAriel Elior qm_info->vport_wfq_en,
2011b5a9ee7cSAriel Elior qm_info->pf_wfq,
2012b5a9ee7cSAriel Elior qm_info->pf_rl,
2013b5a9ee7cSAriel Elior qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
2014b5a9ee7cSAriel Elior
2015b5a9ee7cSAriel Elior /* port table */
201678cea9ffSTomer Tayar for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) {
2017b5a9ee7cSAriel Elior port = &(qm_info->qm_port_params[i]);
2018b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn,
2019b5a9ee7cSAriel Elior NETIF_MSG_HW,
2020b5a9ee7cSAriel Elior "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
2021b5a9ee7cSAriel Elior i,
2022b5a9ee7cSAriel Elior port->active,
2023b5a9ee7cSAriel Elior port->active_phys_tcs,
2024b5a9ee7cSAriel Elior port->num_pbf_cmd_lines,
2025b5a9ee7cSAriel Elior port->num_btb_blocks, port->reserved);
2026b5a9ee7cSAriel Elior }
2027b5a9ee7cSAriel Elior
2028b5a9ee7cSAriel Elior /* vport table */
2029b5a9ee7cSAriel Elior for (i = 0; i < qm_info->num_vports; i++) {
2030b5a9ee7cSAriel Elior vport = &(qm_info->qm_vport_params[i]);
2031b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn,
2032b5a9ee7cSAriel Elior NETIF_MSG_HW,
203392fae6fbSMichal Kalderon "vport idx %d, wfq %d, first_tx_pq_id [ ",
203492fae6fbSMichal Kalderon qm_info->start_vport + i, vport->wfq);
2035b5a9ee7cSAriel Elior for (tc = 0; tc < NUM_OF_TCS; tc++)
2036b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn,
2037b5a9ee7cSAriel Elior NETIF_MSG_HW,
2038b5a9ee7cSAriel Elior "%d ", vport->first_tx_pq_id[tc]);
2039b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n");
2040b5a9ee7cSAriel Elior }
2041b5a9ee7cSAriel Elior
2042b5a9ee7cSAriel Elior /* pq table */
2043b5a9ee7cSAriel Elior for (i = 0; i < qm_info->num_pqs; i++) {
2044b5a9ee7cSAriel Elior pq = &(qm_info->qm_pq_params[i]);
2045b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn,
2046b5a9ee7cSAriel Elior NETIF_MSG_HW,
204792fae6fbSMichal Kalderon "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n",
2048b5a9ee7cSAriel Elior qm_info->start_pq + i,
204950bc60cbSMichal Kalderon pq->port_id,
2050b5a9ee7cSAriel Elior pq->vport_id,
205192fae6fbSMichal Kalderon pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);
2052b5a9ee7cSAriel Elior }
2053b5a9ee7cSAriel Elior }
2054b5a9ee7cSAriel Elior
qed_init_qm_info(struct qed_hwfn * p_hwfn)2055b5a9ee7cSAriel Elior static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
2056b5a9ee7cSAriel Elior {
2057b5a9ee7cSAriel Elior /* reset params required for init run */
2058b5a9ee7cSAriel Elior qed_init_qm_reset_params(p_hwfn);
2059b5a9ee7cSAriel Elior
2060b5a9ee7cSAriel Elior /* init QM top level params */
2061b5a9ee7cSAriel Elior qed_init_qm_params(p_hwfn);
2062b5a9ee7cSAriel Elior
2063b5a9ee7cSAriel Elior /* init QM port params */
2064b5a9ee7cSAriel Elior qed_init_qm_port_params(p_hwfn);
2065b5a9ee7cSAriel Elior
2066b5a9ee7cSAriel Elior /* init QM vport params */
2067b5a9ee7cSAriel Elior qed_init_qm_vport_params(p_hwfn);
2068b5a9ee7cSAriel Elior
2069b5a9ee7cSAriel Elior /* init QM physical queue params */
2070b5a9ee7cSAriel Elior qed_init_qm_pq_params(p_hwfn);
2071b5a9ee7cSAriel Elior
2072b5a9ee7cSAriel Elior /* display all that init */
2073b5a9ee7cSAriel Elior qed_dp_init_qm_params(p_hwfn);
2074fe56b9e6SYuval Mintz }
2075fe56b9e6SYuval Mintz
207639651abdSSudarsana Reddy Kalluru /* This function reconfigures the QM pf on the fly.
207739651abdSSudarsana Reddy Kalluru * For this purpose we:
207839651abdSSudarsana Reddy Kalluru * 1. reconfigure the QM database
2079a2e7699eSTomer Tayar * 2. set new values to runtime array
208039651abdSSudarsana Reddy Kalluru * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
208139651abdSSudarsana Reddy Kalluru * 4. activate init tool in QM_PF stage
208239651abdSSudarsana Reddy Kalluru * 5. send an sdm_qm_cmd through rbc interface to release the QM
208339651abdSSudarsana Reddy Kalluru */
qed_qm_reconf(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)208439651abdSSudarsana Reddy Kalluru int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
208539651abdSSudarsana Reddy Kalluru {
208639651abdSSudarsana Reddy Kalluru struct qed_qm_info *qm_info = &p_hwfn->qm_info;
208739651abdSSudarsana Reddy Kalluru bool b_rc;
208839651abdSSudarsana Reddy Kalluru int rc;
208939651abdSSudarsana Reddy Kalluru
209039651abdSSudarsana Reddy Kalluru /* initialize qed's qm data structure */
2091b5a9ee7cSAriel Elior qed_init_qm_info(p_hwfn);
209239651abdSSudarsana Reddy Kalluru
209339651abdSSudarsana Reddy Kalluru /* stop PF's qm queues */
209439651abdSSudarsana Reddy Kalluru spin_lock_bh(&qm_lock);
209539651abdSSudarsana Reddy Kalluru b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
209639651abdSSudarsana Reddy Kalluru qm_info->start_pq, qm_info->num_pqs);
209739651abdSSudarsana Reddy Kalluru spin_unlock_bh(&qm_lock);
209839651abdSSudarsana Reddy Kalluru if (!b_rc)
209939651abdSSudarsana Reddy Kalluru return -EINVAL;
210039651abdSSudarsana Reddy Kalluru
210139651abdSSudarsana Reddy Kalluru /* prepare QM portion of runtime array */
2102da090917STomer Tayar qed_qm_init_pf(p_hwfn, p_ptt, false);
210339651abdSSudarsana Reddy Kalluru
210439651abdSSudarsana Reddy Kalluru /* activate init tool on runtime array */
210539651abdSSudarsana Reddy Kalluru rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
210639651abdSSudarsana Reddy Kalluru p_hwfn->hw_info.hw_mode);
210739651abdSSudarsana Reddy Kalluru if (rc)
210839651abdSSudarsana Reddy Kalluru return rc;
210939651abdSSudarsana Reddy Kalluru
211039651abdSSudarsana Reddy Kalluru /* start PF's qm queues */
211139651abdSSudarsana Reddy Kalluru spin_lock_bh(&qm_lock);
211239651abdSSudarsana Reddy Kalluru b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
211339651abdSSudarsana Reddy Kalluru qm_info->start_pq, qm_info->num_pqs);
211439651abdSSudarsana Reddy Kalluru spin_unlock_bh(&qm_lock);
211539651abdSSudarsana Reddy Kalluru if (!b_rc)
211639651abdSSudarsana Reddy Kalluru return -EINVAL;
211739651abdSSudarsana Reddy Kalluru
211839651abdSSudarsana Reddy Kalluru return 0;
211939651abdSSudarsana Reddy Kalluru }
212039651abdSSudarsana Reddy Kalluru
qed_alloc_qm_data(struct qed_hwfn * p_hwfn)2121b5a9ee7cSAriel Elior static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
2122b5a9ee7cSAriel Elior {
2123b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info;
2124b5a9ee7cSAriel Elior int rc;
2125b5a9ee7cSAriel Elior
2126b5a9ee7cSAriel Elior rc = qed_init_qm_sanity(p_hwfn);
2127b5a9ee7cSAriel Elior if (rc)
2128b5a9ee7cSAriel Elior goto alloc_err;
2129b5a9ee7cSAriel Elior
21306396bb22SKees Cook qm_info->qm_pq_params = kcalloc(qed_init_qm_get_num_pqs(p_hwfn),
21316396bb22SKees Cook sizeof(*qm_info->qm_pq_params),
2132b5a9ee7cSAriel Elior GFP_KERNEL);
2133b5a9ee7cSAriel Elior if (!qm_info->qm_pq_params)
2134b5a9ee7cSAriel Elior goto alloc_err;
2135b5a9ee7cSAriel Elior
21366396bb22SKees Cook qm_info->qm_vport_params = kcalloc(qed_init_qm_get_num_vports(p_hwfn),
21376396bb22SKees Cook sizeof(*qm_info->qm_vport_params),
2138b5a9ee7cSAriel Elior GFP_KERNEL);
2139b5a9ee7cSAriel Elior if (!qm_info->qm_vport_params)
2140b5a9ee7cSAriel Elior goto alloc_err;
2141b5a9ee7cSAriel Elior
21426396bb22SKees Cook qm_info->qm_port_params = kcalloc(p_hwfn->cdev->num_ports_in_engine,
21436396bb22SKees Cook sizeof(*qm_info->qm_port_params),
2144b5a9ee7cSAriel Elior GFP_KERNEL);
2145b5a9ee7cSAriel Elior if (!qm_info->qm_port_params)
2146b5a9ee7cSAriel Elior goto alloc_err;
2147b5a9ee7cSAriel Elior
21486396bb22SKees Cook qm_info->wfq_data = kcalloc(qed_init_qm_get_num_vports(p_hwfn),
21496396bb22SKees Cook sizeof(*qm_info->wfq_data),
2150b5a9ee7cSAriel Elior GFP_KERNEL);
2151b5a9ee7cSAriel Elior if (!qm_info->wfq_data)
2152b5a9ee7cSAriel Elior goto alloc_err;
2153b5a9ee7cSAriel Elior
2154b5a9ee7cSAriel Elior return 0;
2155b5a9ee7cSAriel Elior
2156b5a9ee7cSAriel Elior alloc_err:
2157b5a9ee7cSAriel Elior DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
2158b5a9ee7cSAriel Elior qed_qm_info_free(p_hwfn);
2159b5a9ee7cSAriel Elior return -ENOMEM;
2160b5a9ee7cSAriel Elior }
2161b5a9ee7cSAriel Elior
qed_resc_alloc(struct qed_dev * cdev)2162fe56b9e6SYuval Mintz int qed_resc_alloc(struct qed_dev *cdev)
2163fe56b9e6SYuval Mintz {
2164f9dc4d1fSRam Amrani u32 rdma_tasks, excess_tasks;
2165f9dc4d1fSRam Amrani u32 line_count;
2166fe56b9e6SYuval Mintz int i, rc = 0;
2167fe56b9e6SYuval Mintz
21680db711bbSMintz, Yuval if (IS_VF(cdev)) {
21690db711bbSMintz, Yuval for_each_hwfn(cdev, i) {
21700db711bbSMintz, Yuval rc = qed_l2_alloc(&cdev->hwfns[i]);
21710db711bbSMintz, Yuval if (rc)
21721408cc1fSYuval Mintz return rc;
21730db711bbSMintz, Yuval }
21740db711bbSMintz, Yuval return rc;
21750db711bbSMintz, Yuval }
21761408cc1fSYuval Mintz
2177fe56b9e6SYuval Mintz cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
2178fe56b9e6SYuval Mintz if (!cdev->fw_data)
2179fe56b9e6SYuval Mintz return -ENOMEM;
2180fe56b9e6SYuval Mintz
2181fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) {
2182fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2183dbb799c3SYuval Mintz u32 n_eqes, num_cons;
2184fe56b9e6SYuval Mintz
218536907cd5SAriel Elior /* Initialize the doorbell recovery mechanism */
218636907cd5SAriel Elior rc = qed_db_recovery_setup(p_hwfn);
218736907cd5SAriel Elior if (rc)
218836907cd5SAriel Elior goto alloc_err;
218936907cd5SAriel Elior
2190fe56b9e6SYuval Mintz /* First allocate the context manager structure */
2191fe56b9e6SYuval Mintz rc = qed_cxt_mngr_alloc(p_hwfn);
2192fe56b9e6SYuval Mintz if (rc)
2193fe56b9e6SYuval Mintz goto alloc_err;
2194fe56b9e6SYuval Mintz
2195fe56b9e6SYuval Mintz /* Set the HW cid/tid numbers (in the contest manager)
2196fe56b9e6SYuval Mintz * Must be done prior to any further computations.
2197fe56b9e6SYuval Mintz */
2198f9dc4d1fSRam Amrani rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
2199fe56b9e6SYuval Mintz if (rc)
2200fe56b9e6SYuval Mintz goto alloc_err;
2201fe56b9e6SYuval Mintz
2202b5a9ee7cSAriel Elior rc = qed_alloc_qm_data(p_hwfn);
2203fe56b9e6SYuval Mintz if (rc)
2204fe56b9e6SYuval Mintz goto alloc_err;
2205fe56b9e6SYuval Mintz
2206b5a9ee7cSAriel Elior /* init qm info */
2207b5a9ee7cSAriel Elior qed_init_qm_info(p_hwfn);
2208b5a9ee7cSAriel Elior
2209fe56b9e6SYuval Mintz /* Compute the ILT client partition */
2210f9dc4d1fSRam Amrani rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
2211f9dc4d1fSRam Amrani if (rc) {
2212f9dc4d1fSRam Amrani DP_NOTICE(p_hwfn,
2213f9dc4d1fSRam Amrani "too many ILT lines; re-computing with less lines\n");
2214f9dc4d1fSRam Amrani /* In case there are not enough ILT lines we reduce the
2215f9dc4d1fSRam Amrani * number of RDMA tasks and re-compute.
2216f9dc4d1fSRam Amrani */
2217f9dc4d1fSRam Amrani excess_tasks =
2218f9dc4d1fSRam Amrani qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count);
2219f9dc4d1fSRam Amrani if (!excess_tasks)
2220f9dc4d1fSRam Amrani goto alloc_err;
2221f9dc4d1fSRam Amrani
2222f9dc4d1fSRam Amrani rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
2223f9dc4d1fSRam Amrani rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks);
2224fe56b9e6SYuval Mintz if (rc)
2225fe56b9e6SYuval Mintz goto alloc_err;
2226fe56b9e6SYuval Mintz
2227f9dc4d1fSRam Amrani rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
2228f9dc4d1fSRam Amrani if (rc) {
2229f9dc4d1fSRam Amrani DP_ERR(p_hwfn,
2230f9dc4d1fSRam Amrani "failed ILT compute. Requested too many lines: %u\n",
2231f9dc4d1fSRam Amrani line_count);
2232f9dc4d1fSRam Amrani
2233f9dc4d1fSRam Amrani goto alloc_err;
2234f9dc4d1fSRam Amrani }
2235f9dc4d1fSRam Amrani }
2236f9dc4d1fSRam Amrani
2237fe56b9e6SYuval Mintz /* CID map / ILT shadow table / T2
2238fe56b9e6SYuval Mintz * The talbes sizes are determined by the computations above
2239fe56b9e6SYuval Mintz */
2240fe56b9e6SYuval Mintz rc = qed_cxt_tables_alloc(p_hwfn);
2241fe56b9e6SYuval Mintz if (rc)
2242fe56b9e6SYuval Mintz goto alloc_err;
2243fe56b9e6SYuval Mintz
2244fe56b9e6SYuval Mintz /* SPQ, must follow ILT because initializes SPQ context */
2245fe56b9e6SYuval Mintz rc = qed_spq_alloc(p_hwfn);
2246fe56b9e6SYuval Mintz if (rc)
2247fe56b9e6SYuval Mintz goto alloc_err;
2248fe56b9e6SYuval Mintz
2249fe56b9e6SYuval Mintz /* SP status block allocation */
2250fe56b9e6SYuval Mintz p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
2251fe56b9e6SYuval Mintz RESERVED_PTT_DPC);
2252fe56b9e6SYuval Mintz
2253fe56b9e6SYuval Mintz rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
2254fe56b9e6SYuval Mintz if (rc)
2255fe56b9e6SYuval Mintz goto alloc_err;
2256fe56b9e6SYuval Mintz
225732a47e72SYuval Mintz rc = qed_iov_alloc(p_hwfn);
225832a47e72SYuval Mintz if (rc)
225932a47e72SYuval Mintz goto alloc_err;
226032a47e72SYuval Mintz
2261fe56b9e6SYuval Mintz /* EQ */
2262dbb799c3SYuval Mintz n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
2263c851a9dcSKalderon, Michal if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
2264b8204ad8SYuval Basson u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn);
226567b40dccSKalderon, Michal enum protocol_type rdma_proto;
226667b40dccSKalderon, Michal
226767b40dccSKalderon, Michal if (QED_IS_ROCE_PERSONALITY(p_hwfn))
226867b40dccSKalderon, Michal rdma_proto = PROTOCOLID_ROCE;
226967b40dccSKalderon, Michal else
227067b40dccSKalderon, Michal rdma_proto = PROTOCOLID_IWARP;
227167b40dccSKalderon, Michal
2272dbb799c3SYuval Mintz num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
227367b40dccSKalderon, Michal rdma_proto,
22748c93beafSYuval Mintz NULL) * 2;
2275b8204ad8SYuval Basson /* EQ should be able to get events from all SRQ's
2276b8204ad8SYuval Basson * at the same time
2277b8204ad8SYuval Basson */
2278b8204ad8SYuval Basson n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
2279897e87a1SShai Malin } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
2280897e87a1SShai Malin p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
2281dbb799c3SYuval Mintz num_cons =
2282dbb799c3SYuval Mintz qed_cxt_get_proto_cid_count(p_hwfn,
22831bd4f571SOmkar Kulkarni PROTOCOLID_TCP_ULP,
22848c93beafSYuval Mintz NULL);
2285dbb799c3SYuval Mintz n_eqes += 2 * num_cons;
2286dbb799c3SYuval Mintz }
2287dbb799c3SYuval Mintz
2288dbb799c3SYuval Mintz if (n_eqes > 0xFFFF) {
2289dbb799c3SYuval Mintz DP_ERR(p_hwfn,
2290dbb799c3SYuval Mintz "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
2291dbb799c3SYuval Mintz n_eqes, 0xFFFF);
22923587cb87STomer Tayar goto alloc_no_mem;
22939b15acbfSDan Carpenter }
2294dbb799c3SYuval Mintz
22953587cb87STomer Tayar rc = qed_eq_alloc(p_hwfn, (u16)n_eqes);
22963587cb87STomer Tayar if (rc)
22973587cb87STomer Tayar goto alloc_err;
2298fe56b9e6SYuval Mintz
22993587cb87STomer Tayar rc = qed_consq_alloc(p_hwfn);
23003587cb87STomer Tayar if (rc)
23013587cb87STomer Tayar goto alloc_err;
2302fe56b9e6SYuval Mintz
23030db711bbSMintz, Yuval rc = qed_l2_alloc(p_hwfn);
23040db711bbSMintz, Yuval if (rc)
23050db711bbSMintz, Yuval goto alloc_err;
23060db711bbSMintz, Yuval
23070a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2
23080a7fb11cSYuval Mintz if (p_hwfn->using_ll2) {
23093587cb87STomer Tayar rc = qed_ll2_alloc(p_hwfn);
23103587cb87STomer Tayar if (rc)
23113587cb87STomer Tayar goto alloc_err;
23120a7fb11cSYuval Mintz }
23130a7fb11cSYuval Mintz #endif
23141e128c81SArun Easi
23151e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
23163587cb87STomer Tayar rc = qed_fcoe_alloc(p_hwfn);
23173587cb87STomer Tayar if (rc)
23183587cb87STomer Tayar goto alloc_err;
23191e128c81SArun Easi }
23201e128c81SArun Easi
2321fc831825SYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
23223587cb87STomer Tayar rc = qed_iscsi_alloc(p_hwfn);
23233587cb87STomer Tayar if (rc)
23243587cb87STomer Tayar goto alloc_err;
23253587cb87STomer Tayar rc = qed_ooo_alloc(p_hwfn);
23263587cb87STomer Tayar if (rc)
23273587cb87STomer Tayar goto alloc_err;
2328fc831825SYuval Mintz }
23290a7fb11cSYuval Mintz
2330897e87a1SShai Malin if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
2331897e87a1SShai Malin rc = qed_nvmetcp_alloc(p_hwfn);
2332897e87a1SShai Malin if (rc)
2333897e87a1SShai Malin goto alloc_err;
2334897e87a1SShai Malin rc = qed_ooo_alloc(p_hwfn);
2335897e87a1SShai Malin if (rc)
2336897e87a1SShai Malin goto alloc_err;
2337897e87a1SShai Malin }
2338897e87a1SShai Malin
2339291d57f6SMichal Kalderon if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
2340291d57f6SMichal Kalderon rc = qed_rdma_info_alloc(p_hwfn);
2341291d57f6SMichal Kalderon if (rc)
2342291d57f6SMichal Kalderon goto alloc_err;
2343291d57f6SMichal Kalderon }
2344291d57f6SMichal Kalderon
2345fe56b9e6SYuval Mintz /* DMA info initialization */
2346fe56b9e6SYuval Mintz rc = qed_dmae_info_alloc(p_hwfn);
23472591c280SJoe Perches if (rc)
2348fe56b9e6SYuval Mintz goto alloc_err;
234939651abdSSudarsana Reddy Kalluru
235039651abdSSudarsana Reddy Kalluru /* DCBX initialization */
235139651abdSSudarsana Reddy Kalluru rc = qed_dcbx_info_alloc(p_hwfn);
23522591c280SJoe Perches if (rc)
235339651abdSSudarsana Reddy Kalluru goto alloc_err;
2354a3f72307SDenis Bolotin
23552d22bc83SMichal Kalderon rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info);
2356a3f72307SDenis Bolotin if (rc)
2357a3f72307SDenis Bolotin goto alloc_err;
235839651abdSSudarsana Reddy Kalluru }
2359fe56b9e6SYuval Mintz
236079284adeSMichal Kalderon rc = qed_llh_alloc(cdev);
236179284adeSMichal Kalderon if (rc) {
236279284adeSMichal Kalderon DP_NOTICE(cdev,
236379284adeSMichal Kalderon "Failed to allocate memory for the llh_info structure\n");
236479284adeSMichal Kalderon goto alloc_err;
236579284adeSMichal Kalderon }
236679284adeSMichal Kalderon
2367fe56b9e6SYuval Mintz cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
23682591c280SJoe Perches if (!cdev->reset_stats)
236983aeb933SYuval Mintz goto alloc_no_mem;
2370fe56b9e6SYuval Mintz
2371fe56b9e6SYuval Mintz return 0;
2372fe56b9e6SYuval Mintz
2373dbb799c3SYuval Mintz alloc_no_mem:
2374dbb799c3SYuval Mintz rc = -ENOMEM;
2375fe56b9e6SYuval Mintz alloc_err:
2376fe56b9e6SYuval Mintz qed_resc_free(cdev);
2377fe56b9e6SYuval Mintz return rc;
2378fe56b9e6SYuval Mintz }
2379fe56b9e6SYuval Mintz
qed_fw_err_handler(struct qed_hwfn * p_hwfn,u8 opcode,u16 echo,union event_ring_data * data,u8 fw_return_code)2380fe40a830SPrabhakar Kushwaha static int qed_fw_err_handler(struct qed_hwfn *p_hwfn,
2381fe40a830SPrabhakar Kushwaha u8 opcode,
2382fe40a830SPrabhakar Kushwaha u16 echo,
2383fe40a830SPrabhakar Kushwaha union event_ring_data *data, u8 fw_return_code)
2384fe40a830SPrabhakar Kushwaha {
2385fe40a830SPrabhakar Kushwaha if (fw_return_code != COMMON_ERR_CODE_ERROR)
2386fe40a830SPrabhakar Kushwaha goto eqe_unexpected;
2387fe40a830SPrabhakar Kushwaha
2388fe40a830SPrabhakar Kushwaha if (data->err_data.recovery_scope == ERR_SCOPE_FUNC &&
2389fe40a830SPrabhakar Kushwaha le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) {
2390fe40a830SPrabhakar Kushwaha qed_sriov_vfpf_malicious(p_hwfn, &data->err_data);
2391fe40a830SPrabhakar Kushwaha return 0;
2392fe40a830SPrabhakar Kushwaha }
2393fe40a830SPrabhakar Kushwaha
2394fe40a830SPrabhakar Kushwaha eqe_unexpected:
2395fe40a830SPrabhakar Kushwaha DP_ERR(p_hwfn,
2396fe40a830SPrabhakar Kushwaha "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n",
2397fe40a830SPrabhakar Kushwaha opcode, fw_return_code, echo);
2398fe40a830SPrabhakar Kushwaha return -EINVAL;
2399fe40a830SPrabhakar Kushwaha }
2400fe40a830SPrabhakar Kushwaha
qed_common_eqe_event(struct qed_hwfn * p_hwfn,u8 opcode,__le16 echo,union event_ring_data * data,u8 fw_return_code)2401fe40a830SPrabhakar Kushwaha static int qed_common_eqe_event(struct qed_hwfn *p_hwfn,
2402fe40a830SPrabhakar Kushwaha u8 opcode,
2403fe40a830SPrabhakar Kushwaha __le16 echo,
2404fe40a830SPrabhakar Kushwaha union event_ring_data *data,
2405fe40a830SPrabhakar Kushwaha u8 fw_return_code)
2406fe40a830SPrabhakar Kushwaha {
2407fe40a830SPrabhakar Kushwaha switch (opcode) {
2408fe40a830SPrabhakar Kushwaha case COMMON_EVENT_VF_PF_CHANNEL:
2409fe40a830SPrabhakar Kushwaha case COMMON_EVENT_VF_FLR:
2410fe40a830SPrabhakar Kushwaha return qed_sriov_eqe_event(p_hwfn, opcode, echo, data,
2411fe40a830SPrabhakar Kushwaha fw_return_code);
2412fe40a830SPrabhakar Kushwaha case COMMON_EVENT_FW_ERROR:
2413fe40a830SPrabhakar Kushwaha return qed_fw_err_handler(p_hwfn, opcode,
2414fe40a830SPrabhakar Kushwaha le16_to_cpu(echo), data,
2415fe40a830SPrabhakar Kushwaha fw_return_code);
2416fe40a830SPrabhakar Kushwaha default:
2417fe40a830SPrabhakar Kushwaha DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n",
2418fe40a830SPrabhakar Kushwaha opcode, echo);
2419fe40a830SPrabhakar Kushwaha return -EINVAL;
2420fe40a830SPrabhakar Kushwaha }
2421fe40a830SPrabhakar Kushwaha }
2422fe40a830SPrabhakar Kushwaha
qed_resc_setup(struct qed_dev * cdev)2423fe56b9e6SYuval Mintz void qed_resc_setup(struct qed_dev *cdev)
2424fe56b9e6SYuval Mintz {
2425fe56b9e6SYuval Mintz int i;
2426fe56b9e6SYuval Mintz
24270db711bbSMintz, Yuval if (IS_VF(cdev)) {
24280db711bbSMintz, Yuval for_each_hwfn(cdev, i)
24290db711bbSMintz, Yuval qed_l2_setup(&cdev->hwfns[i]);
24301408cc1fSYuval Mintz return;
24310db711bbSMintz, Yuval }
24321408cc1fSYuval Mintz
2433fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) {
2434fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2435fe56b9e6SYuval Mintz
2436fe56b9e6SYuval Mintz qed_cxt_mngr_setup(p_hwfn);
2437fe56b9e6SYuval Mintz qed_spq_setup(p_hwfn);
24383587cb87STomer Tayar qed_eq_setup(p_hwfn);
24393587cb87STomer Tayar qed_consq_setup(p_hwfn);
2440fe56b9e6SYuval Mintz
2441fe56b9e6SYuval Mintz /* Read shadow of current MFW mailbox */
2442fe56b9e6SYuval Mintz qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
2443fe56b9e6SYuval Mintz memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
2444fe56b9e6SYuval Mintz p_hwfn->mcp_info->mfw_mb_cur,
2445fe56b9e6SYuval Mintz p_hwfn->mcp_info->mfw_mb_length);
2446fe56b9e6SYuval Mintz
2447fe56b9e6SYuval Mintz qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
244832a47e72SYuval Mintz
24490db711bbSMintz, Yuval qed_l2_setup(p_hwfn);
24501ee240e3SMintz, Yuval qed_iov_setup(p_hwfn);
2451fe40a830SPrabhakar Kushwaha qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
2452fe40a830SPrabhakar Kushwaha qed_common_eqe_event);
24530a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2
24540a7fb11cSYuval Mintz if (p_hwfn->using_ll2)
24553587cb87STomer Tayar qed_ll2_setup(p_hwfn);
24560a7fb11cSYuval Mintz #endif
24571e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
24583587cb87STomer Tayar qed_fcoe_setup(p_hwfn);
24591e128c81SArun Easi
24601d6cff4fSYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
24613587cb87STomer Tayar qed_iscsi_setup(p_hwfn);
24623587cb87STomer Tayar qed_ooo_setup(p_hwfn);
24631d6cff4fSYuval Mintz }
2464897e87a1SShai Malin
2465897e87a1SShai Malin if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
2466897e87a1SShai Malin qed_nvmetcp_setup(p_hwfn);
2467897e87a1SShai Malin qed_ooo_setup(p_hwfn);
2468897e87a1SShai Malin }
2469fe56b9e6SYuval Mintz }
2470fe56b9e6SYuval Mintz }
2471fe56b9e6SYuval Mintz
2472fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_CNT (100)
2473fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_TIME (10)
qed_final_cleanup(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 id,bool is_vf)2474fe56b9e6SYuval Mintz int qed_final_cleanup(struct qed_hwfn *p_hwfn,
24750b55e27dSYuval Mintz struct qed_ptt *p_ptt, u16 id, bool is_vf)
2476fe56b9e6SYuval Mintz {
2477fe56b9e6SYuval Mintz u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
2478fe56b9e6SYuval Mintz int rc = -EBUSY;
2479fe56b9e6SYuval Mintz
2480e2dbc223SPrabhakar Kushwaha addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
2481e2dbc223SPrabhakar Kushwaha USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id);
24820b55e27dSYuval Mintz if (is_vf)
24830b55e27dSYuval Mintz id += 0x10;
24840b55e27dSYuval Mintz
2485fc48b7a6SYuval Mintz command |= X_FINAL_CLEANUP_AGG_INT <<
2486fc48b7a6SYuval Mintz SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
2487fc48b7a6SYuval Mintz command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
2488fc48b7a6SYuval Mintz command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
2489fc48b7a6SYuval Mintz command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
2490fe56b9e6SYuval Mintz
2491fe56b9e6SYuval Mintz /* Make sure notification is not set before initiating final cleanup */
2492fe56b9e6SYuval Mintz if (REG_RD(p_hwfn, addr)) {
24931a635e48SYuval Mintz DP_NOTICE(p_hwfn,
2494fe56b9e6SYuval Mintz "Unexpected; Found final cleanup notification before initiating final cleanup\n");
2495fe56b9e6SYuval Mintz REG_WR(p_hwfn, addr, 0);
2496fe56b9e6SYuval Mintz }
2497fe56b9e6SYuval Mintz
2498fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2499d602de8eSJoe Perches "Sending final cleanup for PFVF[%d] [Command %08x]\n",
2500fe56b9e6SYuval Mintz id, command);
2501fe56b9e6SYuval Mintz
2502fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
2503fe56b9e6SYuval Mintz
2504fe56b9e6SYuval Mintz /* Poll until completion */
2505fe56b9e6SYuval Mintz while (!REG_RD(p_hwfn, addr) && count--)
2506fe56b9e6SYuval Mintz msleep(FINAL_CLEANUP_POLL_TIME);
2507fe56b9e6SYuval Mintz
2508fe56b9e6SYuval Mintz if (REG_RD(p_hwfn, addr))
2509fe56b9e6SYuval Mintz rc = 0;
2510fe56b9e6SYuval Mintz else
2511fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn,
2512fe56b9e6SYuval Mintz "Failed to receive FW final cleanup notification\n");
2513fe56b9e6SYuval Mintz
2514fe56b9e6SYuval Mintz /* Cleanup afterwards */
2515fe56b9e6SYuval Mintz REG_WR(p_hwfn, addr, 0);
2516fe56b9e6SYuval Mintz
2517fe56b9e6SYuval Mintz return rc;
2518fe56b9e6SYuval Mintz }
2519fe56b9e6SYuval Mintz
qed_calc_hw_mode(struct qed_hwfn * p_hwfn)25209c79ddaaSMintz, Yuval static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
2521fe56b9e6SYuval Mintz {
2522fe56b9e6SYuval Mintz int hw_mode = 0;
2523fe56b9e6SYuval Mintz
25249c79ddaaSMintz, Yuval if (QED_IS_BB_B0(p_hwfn->cdev)) {
25259c79ddaaSMintz, Yuval hw_mode |= 1 << MODE_BB;
25269c79ddaaSMintz, Yuval } else if (QED_IS_AH(p_hwfn->cdev)) {
25279c79ddaaSMintz, Yuval hw_mode |= 1 << MODE_K2;
25289c79ddaaSMintz, Yuval } else {
25299c79ddaaSMintz, Yuval DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
25309c79ddaaSMintz, Yuval p_hwfn->cdev->type);
25319c79ddaaSMintz, Yuval return -EINVAL;
25329c79ddaaSMintz, Yuval }
2533fe56b9e6SYuval Mintz
253478cea9ffSTomer Tayar switch (p_hwfn->cdev->num_ports_in_engine) {
2535fe56b9e6SYuval Mintz case 1:
2536fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
2537fe56b9e6SYuval Mintz break;
2538fe56b9e6SYuval Mintz case 2:
2539fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
2540fe56b9e6SYuval Mintz break;
2541fe56b9e6SYuval Mintz case 4:
2542fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
2543fe56b9e6SYuval Mintz break;
2544fe56b9e6SYuval Mintz default:
2545fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
254678cea9ffSTomer Tayar p_hwfn->cdev->num_ports_in_engine);
25479c79ddaaSMintz, Yuval return -EINVAL;
2548fe56b9e6SYuval Mintz }
2549fe56b9e6SYuval Mintz
25500bc5fe85SSudarsana Reddy Kalluru if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
2551fc48b7a6SYuval Mintz hw_mode |= 1 << MODE_MF_SD;
25520bc5fe85SSudarsana Reddy Kalluru else
2553fc48b7a6SYuval Mintz hw_mode |= 1 << MODE_MF_SI;
2554fe56b9e6SYuval Mintz
2555fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_ASIC;
2556fe56b9e6SYuval Mintz
25571af9dcf7SYuval Mintz if (p_hwfn->cdev->num_hwfns > 1)
25581af9dcf7SYuval Mintz hw_mode |= 1 << MODE_100G;
25591af9dcf7SYuval Mintz
2560fe56b9e6SYuval Mintz p_hwfn->hw_info.hw_mode = hw_mode;
25611af9dcf7SYuval Mintz
25621af9dcf7SYuval Mintz DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
25631af9dcf7SYuval Mintz "Configuring function for hw_mode: 0x%08x\n",
25641af9dcf7SYuval Mintz p_hwfn->hw_info.hw_mode);
25659c79ddaaSMintz, Yuval
25669c79ddaaSMintz, Yuval return 0;
2567fe56b9e6SYuval Mintz }
2568fe56b9e6SYuval Mintz
2569fe56b9e6SYuval Mintz /* Init run time data for all PFs on an engine. */
qed_init_cau_rt_data(struct qed_dev * cdev)2570fe56b9e6SYuval Mintz static void qed_init_cau_rt_data(struct qed_dev *cdev)
2571fe56b9e6SYuval Mintz {
2572fe56b9e6SYuval Mintz u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
2573d031548eSMintz, Yuval int i, igu_sb_id;
2574fe56b9e6SYuval Mintz
2575fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) {
2576fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2577fe56b9e6SYuval Mintz struct qed_igu_info *p_igu_info;
2578fe56b9e6SYuval Mintz struct qed_igu_block *p_block;
2579fe56b9e6SYuval Mintz struct cau_sb_entry sb_entry;
2580fe56b9e6SYuval Mintz
2581fe56b9e6SYuval Mintz p_igu_info = p_hwfn->hw_info.p_igu_info;
2582fe56b9e6SYuval Mintz
2583d031548eSMintz, Yuval for (igu_sb_id = 0;
2584d031548eSMintz, Yuval igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) {
2585d031548eSMintz, Yuval p_block = &p_igu_info->entry[igu_sb_id];
2586d031548eSMintz, Yuval
2587fe56b9e6SYuval Mintz if (!p_block->is_pf)
2588fe56b9e6SYuval Mintz continue;
2589fe56b9e6SYuval Mintz
2590fe56b9e6SYuval Mintz qed_init_cau_sb_entry(p_hwfn, &sb_entry,
25911a635e48SYuval Mintz p_block->function_id, 0, 0);
2592d031548eSMintz, Yuval STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
2593d031548eSMintz, Yuval sb_entry);
2594fe56b9e6SYuval Mintz }
2595fe56b9e6SYuval Mintz }
2596fe56b9e6SYuval Mintz }
2597fe56b9e6SYuval Mintz
qed_init_cache_line_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)259860afed72STomer Tayar static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
259960afed72STomer Tayar struct qed_ptt *p_ptt)
260060afed72STomer Tayar {
260160afed72STomer Tayar u32 val, wr_mbs, cache_line_size;
260260afed72STomer Tayar
260360afed72STomer Tayar val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
260460afed72STomer Tayar switch (val) {
260560afed72STomer Tayar case 0:
260660afed72STomer Tayar wr_mbs = 128;
260760afed72STomer Tayar break;
260860afed72STomer Tayar case 1:
260960afed72STomer Tayar wr_mbs = 256;
261060afed72STomer Tayar break;
261160afed72STomer Tayar case 2:
261260afed72STomer Tayar wr_mbs = 512;
261360afed72STomer Tayar break;
261460afed72STomer Tayar default:
261560afed72STomer Tayar DP_INFO(p_hwfn,
261660afed72STomer Tayar "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
261760afed72STomer Tayar val);
261860afed72STomer Tayar return;
261960afed72STomer Tayar }
262060afed72STomer Tayar
262160afed72STomer Tayar cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs);
262260afed72STomer Tayar switch (cache_line_size) {
262360afed72STomer Tayar case 32:
262460afed72STomer Tayar val = 0;
262560afed72STomer Tayar break;
262660afed72STomer Tayar case 64:
262760afed72STomer Tayar val = 1;
262860afed72STomer Tayar break;
262960afed72STomer Tayar case 128:
263060afed72STomer Tayar val = 2;
263160afed72STomer Tayar break;
263260afed72STomer Tayar case 256:
263360afed72STomer Tayar val = 3;
263460afed72STomer Tayar break;
263560afed72STomer Tayar default:
263660afed72STomer Tayar DP_INFO(p_hwfn,
263760afed72STomer Tayar "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
263860afed72STomer Tayar cache_line_size);
263960afed72STomer Tayar }
264060afed72STomer Tayar
2641fe40a830SPrabhakar Kushwaha if (wr_mbs < L1_CACHE_BYTES)
264260afed72STomer Tayar DP_INFO(p_hwfn,
264360afed72STomer Tayar "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
264460afed72STomer Tayar L1_CACHE_BYTES, wr_mbs);
264560afed72STomer Tayar
264660afed72STomer Tayar STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
2647fc6575bcSMintz, Yuval if (val > 0) {
2648fc6575bcSMintz, Yuval STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
2649fc6575bcSMintz, Yuval STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
2650fc6575bcSMintz, Yuval }
265160afed72STomer Tayar }
265260afed72STomer Tayar
qed_hw_init_common(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,int hw_mode)2653fe56b9e6SYuval Mintz static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
26541a635e48SYuval Mintz struct qed_ptt *p_ptt, int hw_mode)
2655fe56b9e6SYuval Mintz {
2656fe56b9e6SYuval Mintz struct qed_qm_info *qm_info = &p_hwfn->qm_info;
2657fe40a830SPrabhakar Kushwaha struct qed_qm_common_rt_init_params *params;
2658fe56b9e6SYuval Mintz struct qed_dev *cdev = p_hwfn->cdev;
26599c79ddaaSMintz, Yuval u8 vf_id, max_num_vfs;
2660dbb799c3SYuval Mintz u16 num_pfs, pf_id;
26611408cc1fSYuval Mintz u32 concrete_fid;
2662fe56b9e6SYuval Mintz int rc = 0;
2663fe56b9e6SYuval Mintz
2664fe40a830SPrabhakar Kushwaha params = kzalloc(sizeof(*params), GFP_KERNEL);
2665fe40a830SPrabhakar Kushwaha if (!params) {
2666fe40a830SPrabhakar Kushwaha DP_NOTICE(p_hwfn->cdev,
2667fe40a830SPrabhakar Kushwaha "Failed to allocate common init params\n");
2668fe40a830SPrabhakar Kushwaha
2669fe40a830SPrabhakar Kushwaha return -ENOMEM;
2670fe40a830SPrabhakar Kushwaha }
2671fe40a830SPrabhakar Kushwaha
2672fe56b9e6SYuval Mintz qed_init_cau_rt_data(cdev);
2673fe56b9e6SYuval Mintz
2674fe56b9e6SYuval Mintz /* Program GTT windows */
2675fe56b9e6SYuval Mintz qed_gtt_init(p_hwfn);
2676fe56b9e6SYuval Mintz
2677fe56b9e6SYuval Mintz if (p_hwfn->mcp_info) {
2678fe56b9e6SYuval Mintz if (p_hwfn->mcp_info->func_info.bandwidth_max)
2679c7281d59SGustavo A. R. Silva qm_info->pf_rl_en = true;
2680fe56b9e6SYuval Mintz if (p_hwfn->mcp_info->func_info.bandwidth_min)
2681c7281d59SGustavo A. R. Silva qm_info->pf_wfq_en = true;
2682fe56b9e6SYuval Mintz }
2683fe56b9e6SYuval Mintz
2684fe40a830SPrabhakar Kushwaha params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
2685fe40a830SPrabhakar Kushwaha params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
2686fe40a830SPrabhakar Kushwaha params->pf_rl_en = qm_info->pf_rl_en;
2687fe40a830SPrabhakar Kushwaha params->pf_wfq_en = qm_info->pf_wfq_en;
2688fe40a830SPrabhakar Kushwaha params->global_rl_en = qm_info->vport_rl_en;
2689fe40a830SPrabhakar Kushwaha params->vport_wfq_en = qm_info->vport_wfq_en;
2690fe40a830SPrabhakar Kushwaha params->port_params = qm_info->qm_port_params;
2691fe56b9e6SYuval Mintz
2692fe40a830SPrabhakar Kushwaha qed_qm_common_rt_init(p_hwfn, params);
2693fe56b9e6SYuval Mintz
2694fe56b9e6SYuval Mintz qed_cxt_hw_init_common(p_hwfn);
2695fe56b9e6SYuval Mintz
269660afed72STomer Tayar qed_init_cache_line_size(p_hwfn, p_ptt);
269760afed72STomer Tayar
2698fe56b9e6SYuval Mintz rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
26991a635e48SYuval Mintz if (rc)
2700fe40a830SPrabhakar Kushwaha goto out;
2701fe56b9e6SYuval Mintz
2702fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
2703fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
2704fe56b9e6SYuval Mintz
2705dbb799c3SYuval Mintz if (QED_IS_BB(p_hwfn->cdev)) {
2706dbb799c3SYuval Mintz num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev);
2707dbb799c3SYuval Mintz for (pf_id = 0; pf_id < num_pfs; pf_id++) {
2708dbb799c3SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, pf_id);
2709dbb799c3SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
2710dbb799c3SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
2711dbb799c3SYuval Mintz }
2712dbb799c3SYuval Mintz /* pretend to original PF */
2713dbb799c3SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2714dbb799c3SYuval Mintz }
2715fe56b9e6SYuval Mintz
27169c79ddaaSMintz, Yuval max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
27179c79ddaaSMintz, Yuval for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
27181408cc1fSYuval Mintz concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
27191408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
27201408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
272105fafbfbSYuval Mintz qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
272205fafbfbSYuval Mintz qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
272305fafbfbSYuval Mintz qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
27241408cc1fSYuval Mintz }
27251408cc1fSYuval Mintz /* pretend to original PF */
27261408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
27271408cc1fSYuval Mintz
2728fe40a830SPrabhakar Kushwaha out:
2729fe40a830SPrabhakar Kushwaha kfree(params);
2730fe40a830SPrabhakar Kushwaha
2731fe56b9e6SYuval Mintz return rc;
2732fe56b9e6SYuval Mintz }
2733fe56b9e6SYuval Mintz
273451ff1725SRam Amrani static int
qed_hw_init_dpi_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 pwm_region_size,u32 n_cpus)273551ff1725SRam Amrani qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
273651ff1725SRam Amrani struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
273751ff1725SRam Amrani {
2738107392b7SRam Amrani u32 dpi_bit_shift, dpi_count, dpi_page_size;
273951ff1725SRam Amrani u32 min_dpis;
2740107392b7SRam Amrani u32 n_wids;
274151ff1725SRam Amrani
274251ff1725SRam Amrani /* Calculate DPI size */
2743107392b7SRam Amrani n_wids = max_t(u32, QED_MIN_WIDS, n_cpus);
2744107392b7SRam Amrani dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids);
2745107392b7SRam Amrani dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
274651ff1725SRam Amrani dpi_bit_shift = ilog2(dpi_page_size / 4096);
274751ff1725SRam Amrani dpi_count = pwm_region_size / dpi_page_size;
274851ff1725SRam Amrani
274951ff1725SRam Amrani min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
275051ff1725SRam Amrani min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
275151ff1725SRam Amrani
275251ff1725SRam Amrani p_hwfn->dpi_size = dpi_page_size;
275351ff1725SRam Amrani p_hwfn->dpi_count = dpi_count;
275451ff1725SRam Amrani
275551ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
275651ff1725SRam Amrani
275751ff1725SRam Amrani if (dpi_count < min_dpis)
275851ff1725SRam Amrani return -EINVAL;
275951ff1725SRam Amrani
276051ff1725SRam Amrani return 0;
276151ff1725SRam Amrani }
276251ff1725SRam Amrani
276351ff1725SRam Amrani enum QED_ROCE_EDPM_MODE {
276451ff1725SRam Amrani QED_ROCE_EDPM_MODE_ENABLE = 0,
276551ff1725SRam Amrani QED_ROCE_EDPM_MODE_FORCE_ON = 1,
276651ff1725SRam Amrani QED_ROCE_EDPM_MODE_DISABLE = 2,
276751ff1725SRam Amrani };
276851ff1725SRam Amrani
qed_edpm_enabled(struct qed_hwfn * p_hwfn)2769a1b469b8SAriel Elior bool qed_edpm_enabled(struct qed_hwfn *p_hwfn)
2770a1b469b8SAriel Elior {
2771a1b469b8SAriel Elior if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
2772a1b469b8SAriel Elior return false;
2773a1b469b8SAriel Elior
2774a1b469b8SAriel Elior return true;
2775a1b469b8SAriel Elior }
2776a1b469b8SAriel Elior
277751ff1725SRam Amrani static int
qed_hw_init_pf_doorbell_bar(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)277851ff1725SRam Amrani qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
277951ff1725SRam Amrani {
278051ff1725SRam Amrani u32 pwm_regsize, norm_regsize;
278151ff1725SRam Amrani u32 non_pwm_conn, min_addr_reg1;
278220b1bd96SRam Amrani u32 db_bar_size, n_cpus = 1;
278351ff1725SRam Amrani u32 roce_edpm_mode;
278451ff1725SRam Amrani u32 pf_dems_shift;
278551ff1725SRam Amrani int rc = 0;
278651ff1725SRam Amrani u8 cond;
278751ff1725SRam Amrani
278815582962SRahul Verma db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
278951ff1725SRam Amrani if (p_hwfn->cdev->num_hwfns > 1)
279051ff1725SRam Amrani db_bar_size /= 2;
279151ff1725SRam Amrani
279251ff1725SRam Amrani /* Calculate doorbell regions */
279351ff1725SRam Amrani non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
279451ff1725SRam Amrani qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
279551ff1725SRam Amrani NULL) +
279651ff1725SRam Amrani qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
279751ff1725SRam Amrani NULL);
2798a82dadbcSRam Amrani norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE);
279951ff1725SRam Amrani min_addr_reg1 = norm_regsize / 4096;
280051ff1725SRam Amrani pwm_regsize = db_bar_size - norm_regsize;
280151ff1725SRam Amrani
280251ff1725SRam Amrani /* Check that the normal and PWM sizes are valid */
280351ff1725SRam Amrani if (db_bar_size < norm_regsize) {
280451ff1725SRam Amrani DP_ERR(p_hwfn->cdev,
280551ff1725SRam Amrani "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
280651ff1725SRam Amrani db_bar_size, norm_regsize);
280751ff1725SRam Amrani return -EINVAL;
280851ff1725SRam Amrani }
280951ff1725SRam Amrani
281051ff1725SRam Amrani if (pwm_regsize < QED_MIN_PWM_REGION) {
281151ff1725SRam Amrani DP_ERR(p_hwfn->cdev,
281251ff1725SRam Amrani "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
281351ff1725SRam Amrani pwm_regsize,
281451ff1725SRam Amrani QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
281551ff1725SRam Amrani return -EINVAL;
281651ff1725SRam Amrani }
281751ff1725SRam Amrani
281851ff1725SRam Amrani /* Calculate number of DPIs */
281951ff1725SRam Amrani roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
282051ff1725SRam Amrani if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
282151ff1725SRam Amrani ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
282251ff1725SRam Amrani /* Either EDPM is mandatory, or we are attempting to allocate a
282351ff1725SRam Amrani * WID per CPU.
282451ff1725SRam Amrani */
2825c2dedf87SRam Amrani n_cpus = num_present_cpus();
282651ff1725SRam Amrani rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
282751ff1725SRam Amrani }
282851ff1725SRam Amrani
282951ff1725SRam Amrani cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
283051ff1725SRam Amrani (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
283151ff1725SRam Amrani if (cond || p_hwfn->dcbx_no_edpm) {
283251ff1725SRam Amrani /* Either EDPM is disabled from user configuration, or it is
283351ff1725SRam Amrani * disabled via DCBx, or it is not mandatory and we failed to
283451ff1725SRam Amrani * allocated a WID per CPU.
283551ff1725SRam Amrani */
283651ff1725SRam Amrani n_cpus = 1;
283751ff1725SRam Amrani rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
283851ff1725SRam Amrani
283951ff1725SRam Amrani if (cond)
284051ff1725SRam Amrani qed_rdma_dpm_bar(p_hwfn, p_ptt);
284151ff1725SRam Amrani }
284251ff1725SRam Amrani
284320b1bd96SRam Amrani p_hwfn->wid_count = (u16)n_cpus;
284420b1bd96SRam Amrani
284551ff1725SRam Amrani DP_INFO(p_hwfn,
2846a1b469b8SAriel Elior "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
284751ff1725SRam Amrani norm_regsize,
284851ff1725SRam Amrani pwm_regsize,
284951ff1725SRam Amrani p_hwfn->dpi_size,
285051ff1725SRam Amrani p_hwfn->dpi_count,
2851a1b469b8SAriel Elior (!qed_edpm_enabled(p_hwfn)) ?
2852a1b469b8SAriel Elior "disabled" : "enabled", PAGE_SIZE);
285351ff1725SRam Amrani
285451ff1725SRam Amrani if (rc) {
285551ff1725SRam Amrani DP_ERR(p_hwfn,
285651ff1725SRam Amrani "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
285751ff1725SRam Amrani p_hwfn->dpi_count,
285851ff1725SRam Amrani p_hwfn->pf_params.rdma_pf_params.min_dpis);
285951ff1725SRam Amrani return -EINVAL;
286051ff1725SRam Amrani }
286151ff1725SRam Amrani
286251ff1725SRam Amrani p_hwfn->dpi_start_offset = norm_regsize;
286351ff1725SRam Amrani
286451ff1725SRam Amrani /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
286551ff1725SRam Amrani pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
286651ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
286751ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
286851ff1725SRam Amrani
286951ff1725SRam Amrani return 0;
287051ff1725SRam Amrani }
287151ff1725SRam Amrani
qed_hw_init_port(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,int hw_mode)2872fe56b9e6SYuval Mintz static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
28731a635e48SYuval Mintz struct qed_ptt *p_ptt, int hw_mode)
2874fe56b9e6SYuval Mintz {
2875fc6575bcSMintz, Yuval int rc = 0;
2876fc6575bcSMintz, Yuval
287779284adeSMichal Kalderon /* In CMT the gate should be cleared by the 2nd hwfn */
287879284adeSMichal Kalderon if (!QED_IS_CMT(p_hwfn->cdev) || !IS_LEAD_HWFN(p_hwfn))
287979284adeSMichal Kalderon STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
288079284adeSMichal Kalderon
2881fc6575bcSMintz, Yuval rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
2882fc6575bcSMintz, Yuval if (rc)
2883fc6575bcSMintz, Yuval return rc;
2884fc6575bcSMintz, Yuval
2885fc6575bcSMintz, Yuval qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
2886fc6575bcSMintz, Yuval
2887fc6575bcSMintz, Yuval return 0;
2888fe56b9e6SYuval Mintz }
2889fe56b9e6SYuval Mintz
qed_hw_init_pf(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_tunnel_info * p_tunn,int hw_mode,bool b_hw_start,enum qed_int_mode int_mode,bool allow_npar_tx_switch)2890fe56b9e6SYuval Mintz static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
2891fe56b9e6SYuval Mintz struct qed_ptt *p_ptt,
289219968430SChopra, Manish struct qed_tunnel_info *p_tunn,
2893fe56b9e6SYuval Mintz int hw_mode,
2894fe56b9e6SYuval Mintz bool b_hw_start,
2895fe56b9e6SYuval Mintz enum qed_int_mode int_mode,
2896fe56b9e6SYuval Mintz bool allow_npar_tx_switch)
2897fe56b9e6SYuval Mintz {
2898fe56b9e6SYuval Mintz u8 rel_pf_id = p_hwfn->rel_pf_id;
2899fe56b9e6SYuval Mintz int rc = 0;
2900fe56b9e6SYuval Mintz
2901fe56b9e6SYuval Mintz if (p_hwfn->mcp_info) {
2902fe56b9e6SYuval Mintz struct qed_mcp_function_info *p_info;
2903fe56b9e6SYuval Mintz
2904fe56b9e6SYuval Mintz p_info = &p_hwfn->mcp_info->func_info;
2905fe56b9e6SYuval Mintz if (p_info->bandwidth_min)
2906fe56b9e6SYuval Mintz p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
2907fe56b9e6SYuval Mintz
2908fe56b9e6SYuval Mintz /* Update rate limit once we'll actually have a link */
29094b01e519SManish Chopra p_hwfn->qm_info.pf_rl = 100000;
2910fe56b9e6SYuval Mintz }
2911fe56b9e6SYuval Mintz
291215582962SRahul Verma qed_cxt_hw_init_pf(p_hwfn, p_ptt);
2913fe56b9e6SYuval Mintz
2914fe56b9e6SYuval Mintz qed_int_igu_init_rt(p_hwfn);
2915fe56b9e6SYuval Mintz
2916fe56b9e6SYuval Mintz /* Set VLAN in NIG if needed */
29171a635e48SYuval Mintz if (hw_mode & BIT(MODE_MF_SD)) {
2918fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
2919fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
2920fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
2921fe56b9e6SYuval Mintz p_hwfn->hw_info.ovlan);
2922cac6f691SSudarsana Reddy Kalluru
2923cac6f691SSudarsana Reddy Kalluru DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2924cac6f691SSudarsana Reddy Kalluru "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
2925cac6f691SSudarsana Reddy Kalluru STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
2926cac6f691SSudarsana Reddy Kalluru 1);
2927fe56b9e6SYuval Mintz }
2928fe56b9e6SYuval Mintz
2929fe56b9e6SYuval Mintz /* Enable classification by MAC if needed */
29301a635e48SYuval Mintz if (hw_mode & BIT(MODE_MF_SI)) {
2931fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2932fe56b9e6SYuval Mintz "Configuring TAGMAC_CLS_TYPE\n");
2933fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn,
2934fe56b9e6SYuval Mintz NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
2935fe56b9e6SYuval Mintz }
2936fe56b9e6SYuval Mintz
2937a2e7699eSTomer Tayar /* Protocol Configuration */
2938dbb799c3SYuval Mintz STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
2939897e87a1SShai Malin ((p_hwfn->hw_info.personality == QED_PCI_ISCSI) ||
2940897e87a1SShai Malin (p_hwfn->hw_info.personality == QED_PCI_NVMETCP)) ? 1 : 0);
29411e128c81SArun Easi STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
29421e128c81SArun Easi (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
2943fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
2944fe56b9e6SYuval Mintz
2945da090917STomer Tayar /* Sanity check before the PF init sequence that uses DMAE */
2946da090917STomer Tayar rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
2947da090917STomer Tayar if (rc)
2948da090917STomer Tayar return rc;
2949da090917STomer Tayar
2950fe56b9e6SYuval Mintz /* PF Init sequence */
2951fe56b9e6SYuval Mintz rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
2952fe56b9e6SYuval Mintz if (rc)
2953fe56b9e6SYuval Mintz return rc;
2954fe56b9e6SYuval Mintz
2955fe56b9e6SYuval Mintz /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
2956fe56b9e6SYuval Mintz rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
2957fe56b9e6SYuval Mintz if (rc)
2958fe56b9e6SYuval Mintz return rc;
2959fe56b9e6SYuval Mintz
296030d5f858SMichal Kalderon qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem);
296130d5f858SMichal Kalderon
2962fe56b9e6SYuval Mintz /* Pure runtime initializations - directly to the HW */
2963fe56b9e6SYuval Mintz qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
2964fe56b9e6SYuval Mintz
296551ff1725SRam Amrani rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
296651ff1725SRam Amrani if (rc)
296751ff1725SRam Amrani return rc;
296851ff1725SRam Amrani
296979284adeSMichal Kalderon /* Use the leading hwfn since in CMT only NIG #0 is operational */
297079284adeSMichal Kalderon if (IS_LEAD_HWFN(p_hwfn)) {
297179284adeSMichal Kalderon rc = qed_llh_hw_init_pf(p_hwfn, p_ptt);
297279284adeSMichal Kalderon if (rc)
297379284adeSMichal Kalderon return rc;
297479284adeSMichal Kalderon }
297579284adeSMichal Kalderon
2976fe56b9e6SYuval Mintz if (b_hw_start) {
2977fe56b9e6SYuval Mintz /* enable interrupts */
2978fe56b9e6SYuval Mintz qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
2979fe56b9e6SYuval Mintz
2980fe56b9e6SYuval Mintz /* send function start command */
29814f64675fSManish Chopra rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
2982831bfb0eSYuval Mintz allow_npar_tx_switch);
29831e128c81SArun Easi if (rc) {
2984fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
29851e128c81SArun Easi return rc;
29861e128c81SArun Easi }
29871e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
29881e128c81SArun Easi qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2));
29891e128c81SArun Easi qed_wr(p_hwfn, p_ptt,
29901e128c81SArun Easi PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
29911e128c81SArun Easi 0x100);
29921e128c81SArun Easi }
2993fe56b9e6SYuval Mintz }
2994fe56b9e6SYuval Mintz return rc;
2995fe56b9e6SYuval Mintz }
2996fe56b9e6SYuval Mintz
qed_pglueb_set_pfid_enable(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool b_enable)2997666db486STomer Tayar int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
2998666db486STomer Tayar struct qed_ptt *p_ptt, bool b_enable)
2999fe56b9e6SYuval Mintz {
3000666db486STomer Tayar u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
3001fe56b9e6SYuval Mintz
3002666db486STomer Tayar /* Configure the PF's internal FID_enable for master transactions */
3003666db486STomer Tayar qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
3004fe56b9e6SYuval Mintz
3005666db486STomer Tayar /* Wait until value is set - try for 1 second every 50us */
3006fe56b9e6SYuval Mintz for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
3007fe56b9e6SYuval Mintz val = qed_rd(p_hwfn, p_ptt,
3008fe56b9e6SYuval Mintz PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
3009fe56b9e6SYuval Mintz if (val == set_val)
3010fe56b9e6SYuval Mintz break;
3011fe56b9e6SYuval Mintz
3012fe56b9e6SYuval Mintz usleep_range(50, 60);
3013fe56b9e6SYuval Mintz }
3014fe56b9e6SYuval Mintz
3015fe56b9e6SYuval Mintz if (val != set_val) {
3016fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn,
3017fe56b9e6SYuval Mintz "PFID_ENABLE_MASTER wasn't changed after a second\n");
3018fe56b9e6SYuval Mintz return -EAGAIN;
3019fe56b9e6SYuval Mintz }
3020fe56b9e6SYuval Mintz
3021fe56b9e6SYuval Mintz return 0;
3022fe56b9e6SYuval Mintz }
3023fe56b9e6SYuval Mintz
qed_reset_mb_shadow(struct qed_hwfn * p_hwfn,struct qed_ptt * p_main_ptt)3024fe56b9e6SYuval Mintz static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
3025fe56b9e6SYuval Mintz struct qed_ptt *p_main_ptt)
3026fe56b9e6SYuval Mintz {
3027fe56b9e6SYuval Mintz /* Read shadow of current MFW mailbox */
3028fe56b9e6SYuval Mintz qed_mcp_read_mb(p_hwfn, p_main_ptt);
3029fe56b9e6SYuval Mintz memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
30301a635e48SYuval Mintz p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
3031fe56b9e6SYuval Mintz }
3032fe56b9e6SYuval Mintz
30335d24bcf1STomer Tayar static void
qed_fill_load_req_params(struct qed_load_req_params * p_load_req,struct qed_drv_load_params * p_drv_load)30345d24bcf1STomer Tayar qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
30355d24bcf1STomer Tayar struct qed_drv_load_params *p_drv_load)
30365d24bcf1STomer Tayar {
30375d24bcf1STomer Tayar memset(p_load_req, 0, sizeof(*p_load_req));
30385d24bcf1STomer Tayar
30395d24bcf1STomer Tayar p_load_req->drv_role = p_drv_load->is_crash_kernel ?
30405d24bcf1STomer Tayar QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
30415d24bcf1STomer Tayar p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
30425d24bcf1STomer Tayar p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
30435d24bcf1STomer Tayar p_load_req->override_force_load = p_drv_load->override_force_load;
30445d24bcf1STomer Tayar }
30455d24bcf1STomer Tayar
qed_vf_start(struct qed_hwfn * p_hwfn,struct qed_hw_init_params * p_params)3046eaf3c0c6SChopra, Manish static int qed_vf_start(struct qed_hwfn *p_hwfn,
3047eaf3c0c6SChopra, Manish struct qed_hw_init_params *p_params)
3048eaf3c0c6SChopra, Manish {
3049eaf3c0c6SChopra, Manish if (p_params->p_tunn) {
3050eaf3c0c6SChopra, Manish qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
3051eaf3c0c6SChopra, Manish qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
3052eaf3c0c6SChopra, Manish }
3053eaf3c0c6SChopra, Manish
3054c7281d59SGustavo A. R. Silva p_hwfn->b_int_enabled = true;
3055eaf3c0c6SChopra, Manish
3056eaf3c0c6SChopra, Manish return 0;
3057eaf3c0c6SChopra, Manish }
3058eaf3c0c6SChopra, Manish
qed_pglueb_clear_err(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3059666db486STomer Tayar static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3060666db486STomer Tayar {
3061666db486STomer Tayar qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
3062666db486STomer Tayar BIT(p_hwfn->abs_pf_id));
3063666db486STomer Tayar }
3064666db486STomer Tayar
qed_hw_init(struct qed_dev * cdev,struct qed_hw_init_params * p_params)3065c0c2d0b4SMintz, Yuval int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
3066fe56b9e6SYuval Mintz {
30675d24bcf1STomer Tayar struct qed_load_req_params load_req_params;
306850fdf601SSudarsana Reddy Kalluru u32 load_code, resp, param, drv_mb_param;
30690fefbfbaSSudarsana Kalluru bool b_default_mtu = true;
30700fefbfbaSSudarsana Kalluru struct qed_hwfn *p_hwfn;
307130d5f858SMichal Kalderon const u32 *fw_overlays;
307230d5f858SMichal Kalderon u32 fw_overlays_len;
3073cac6f691SSudarsana Reddy Kalluru u16 ether_type;
307430d5f858SMichal Kalderon int rc = 0, i;
3075fe56b9e6SYuval Mintz
3076c0c2d0b4SMintz, Yuval if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
3077bb13ace7SSudarsana Reddy Kalluru DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
3078bb13ace7SSudarsana Reddy Kalluru return -EINVAL;
3079bb13ace7SSudarsana Reddy Kalluru }
3080bb13ace7SSudarsana Reddy Kalluru
30811408cc1fSYuval Mintz if (IS_PF(cdev)) {
3082c0c2d0b4SMintz, Yuval rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
30831a635e48SYuval Mintz if (rc)
3084fe56b9e6SYuval Mintz return rc;
30851408cc1fSYuval Mintz }
3086fe56b9e6SYuval Mintz
3087fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) {
3088666db486STomer Tayar p_hwfn = &cdev->hwfns[i];
3089fe56b9e6SYuval Mintz
30900fefbfbaSSudarsana Kalluru /* If management didn't provide a default, set one of our own */
30910fefbfbaSSudarsana Kalluru if (!p_hwfn->hw_info.mtu) {
30920fefbfbaSSudarsana Kalluru p_hwfn->hw_info.mtu = 1500;
30930fefbfbaSSudarsana Kalluru b_default_mtu = false;
30940fefbfbaSSudarsana Kalluru }
30950fefbfbaSSudarsana Kalluru
30961408cc1fSYuval Mintz if (IS_VF(cdev)) {
3097eaf3c0c6SChopra, Manish qed_vf_start(p_hwfn, p_params);
30981408cc1fSYuval Mintz continue;
30991408cc1fSYuval Mintz }
31001408cc1fSYuval Mintz
3101ca2d5f1fSVenkata Sudheer Kumar Bhavaraju /* Some flows may keep variable set */
3102ca2d5f1fSVenkata Sudheer Kumar Bhavaraju p_hwfn->mcp_info->mcp_handling_status = 0;
3103ca2d5f1fSVenkata Sudheer Kumar Bhavaraju
31049c79ddaaSMintz, Yuval rc = qed_calc_hw_mode(p_hwfn);
31059c79ddaaSMintz, Yuval if (rc)
31069c79ddaaSMintz, Yuval return rc;
3107fe56b9e6SYuval Mintz
3108cac6f691SSudarsana Reddy Kalluru if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING,
3109cac6f691SSudarsana Reddy Kalluru &cdev->mf_bits) ||
3110cac6f691SSudarsana Reddy Kalluru test_bit(QED_MF_8021AD_TAGGING,
3111cac6f691SSudarsana Reddy Kalluru &cdev->mf_bits))) {
3112cac6f691SSudarsana Reddy Kalluru if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits))
3113cac6f691SSudarsana Reddy Kalluru ether_type = ETH_P_8021Q;
3114cac6f691SSudarsana Reddy Kalluru else
3115cac6f691SSudarsana Reddy Kalluru ether_type = ETH_P_8021AD;
3116b51bdfb9SSudarsana Reddy Kalluru STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
3117cac6f691SSudarsana Reddy Kalluru ether_type);
3118b51bdfb9SSudarsana Reddy Kalluru STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
3119cac6f691SSudarsana Reddy Kalluru ether_type);
3120b51bdfb9SSudarsana Reddy Kalluru STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
3121cac6f691SSudarsana Reddy Kalluru ether_type);
3122b51bdfb9SSudarsana Reddy Kalluru STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
3123cac6f691SSudarsana Reddy Kalluru ether_type);
3124b51bdfb9SSudarsana Reddy Kalluru }
3125b51bdfb9SSudarsana Reddy Kalluru
31265d24bcf1STomer Tayar qed_fill_load_req_params(&load_req_params,
31275d24bcf1STomer Tayar p_params->p_drv_load_params);
31285d24bcf1STomer Tayar rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
31295d24bcf1STomer Tayar &load_req_params);
3130fe56b9e6SYuval Mintz if (rc) {
31315d24bcf1STomer Tayar DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
3132fe56b9e6SYuval Mintz return rc;
3133fe56b9e6SYuval Mintz }
3134fe56b9e6SYuval Mintz
31355d24bcf1STomer Tayar load_code = load_req_params.load_code;
3136fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_SP,
31375d24bcf1STomer Tayar "Load request was sent. Load code: 0x%x\n",
31385d24bcf1STomer Tayar load_code);
31395d24bcf1STomer Tayar
314064515dc8STomer Tayar /* Only relevant for recovery:
314164515dc8STomer Tayar * Clear the indication after LOAD_REQ is responded by the MFW.
314264515dc8STomer Tayar */
314364515dc8STomer Tayar cdev->recov_in_prog = false;
314464515dc8STomer Tayar
3145645874e5SSudarsana Reddy Kalluru qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
3146645874e5SSudarsana Reddy Kalluru
31475d24bcf1STomer Tayar qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
3148fe56b9e6SYuval Mintz
3149666db486STomer Tayar /* Clean up chip from previous driver if such remains exist.
3150666db486STomer Tayar * This is not needed when the PF is the first one on the
3151666db486STomer Tayar * engine, since afterwards we are going to init the FW.
3152666db486STomer Tayar */
3153666db486STomer Tayar if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
3154666db486STomer Tayar rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
3155666db486STomer Tayar p_hwfn->rel_pf_id, false);
3156666db486STomer Tayar if (rc) {
31572ec276d5SIgor Russkikh qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt,
31582ec276d5SIgor Russkikh QED_HW_ERR_RAMROD_FAIL,
31592ec276d5SIgor Russkikh "Final cleanup failed\n");
3160666db486STomer Tayar goto load_err;
3161666db486STomer Tayar }
3162666db486STomer Tayar }
3163666db486STomer Tayar
3164666db486STomer Tayar /* Log and clear previous pglue_b errors if such exist */
3165eb61c2d6SAlexander Lobakin qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
3166666db486STomer Tayar
3167666db486STomer Tayar /* Enable the PF's internal FID_enable in the PXP */
3168666db486STomer Tayar rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
3169666db486STomer Tayar true);
3170666db486STomer Tayar if (rc)
3171666db486STomer Tayar goto load_err;
3172666db486STomer Tayar
3173666db486STomer Tayar /* Clear the pglue_b was_error indication.
3174666db486STomer Tayar * In E4 it must be done after the BME and the internal
3175666db486STomer Tayar * FID_enable for the PF are set, since VDMs may cause the
3176666db486STomer Tayar * indication to be set again.
3177666db486STomer Tayar */
3178666db486STomer Tayar qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
3179fe56b9e6SYuval Mintz
318030d5f858SMichal Kalderon fw_overlays = cdev->fw_data->fw_overlays;
318130d5f858SMichal Kalderon fw_overlays_len = cdev->fw_data->fw_overlays_len;
318230d5f858SMichal Kalderon p_hwfn->fw_overlay_mem =
318330d5f858SMichal Kalderon qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays,
318430d5f858SMichal Kalderon fw_overlays_len);
318530d5f858SMichal Kalderon if (!p_hwfn->fw_overlay_mem) {
318630d5f858SMichal Kalderon DP_NOTICE(p_hwfn,
318730d5f858SMichal Kalderon "Failed to allocate fw overlay memory\n");
3188d32a06f5SDan Carpenter rc = -ENOMEM;
318930d5f858SMichal Kalderon goto load_err;
319030d5f858SMichal Kalderon }
319130d5f858SMichal Kalderon
3192fe56b9e6SYuval Mintz switch (load_code) {
3193fe56b9e6SYuval Mintz case FW_MSG_CODE_DRV_LOAD_ENGINE:
3194fe56b9e6SYuval Mintz rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
3195fe56b9e6SYuval Mintz p_hwfn->hw_info.hw_mode);
3196fe56b9e6SYuval Mintz if (rc)
3197fe56b9e6SYuval Mintz break;
3198df561f66SGustavo A. R. Silva fallthrough;
3199fe56b9e6SYuval Mintz case FW_MSG_CODE_DRV_LOAD_PORT:
3200fe56b9e6SYuval Mintz rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
3201fe56b9e6SYuval Mintz p_hwfn->hw_info.hw_mode);
3202fe56b9e6SYuval Mintz if (rc)
3203fe56b9e6SYuval Mintz break;
3204fe56b9e6SYuval Mintz
3205df561f66SGustavo A. R. Silva fallthrough;
3206fe56b9e6SYuval Mintz case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3207fe56b9e6SYuval Mintz rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
3208c0c2d0b4SMintz, Yuval p_params->p_tunn,
3209c0c2d0b4SMintz, Yuval p_hwfn->hw_info.hw_mode,
3210c0c2d0b4SMintz, Yuval p_params->b_hw_start,
3211c0c2d0b4SMintz, Yuval p_params->int_mode,
3212c0c2d0b4SMintz, Yuval p_params->allow_npar_tx_switch);
3213fe56b9e6SYuval Mintz break;
3214fe56b9e6SYuval Mintz default:
3215c0c2d0b4SMintz, Yuval DP_NOTICE(p_hwfn,
3216c0c2d0b4SMintz, Yuval "Unexpected load code [0x%08x]", load_code);
3217fe56b9e6SYuval Mintz rc = -EINVAL;
3218fe56b9e6SYuval Mintz break;
3219fe56b9e6SYuval Mintz }
3220fe56b9e6SYuval Mintz
3221666db486STomer Tayar if (rc) {
3222fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn,
3223fe56b9e6SYuval Mintz "init phase failed for loadcode 0x%x (rc %d)\n",
3224fe56b9e6SYuval Mintz load_code, rc);
3225666db486STomer Tayar goto load_err;
3226fe56b9e6SYuval Mintz }
3227fe56b9e6SYuval Mintz
3228666db486STomer Tayar rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
3229666db486STomer Tayar if (rc)
3230666db486STomer Tayar return rc;
3231fc561c8bSTomer Tayar
323239651abdSSudarsana Reddy Kalluru /* send DCBX attention request command */
323339651abdSSudarsana Reddy Kalluru DP_VERBOSE(p_hwfn,
323439651abdSSudarsana Reddy Kalluru QED_MSG_DCB,
323539651abdSSudarsana Reddy Kalluru "sending phony dcbx set command to trigger DCBx attention handling\n");
3236666db486STomer Tayar rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
323739651abdSSudarsana Reddy Kalluru DRV_MSG_CODE_SET_DCBX,
323839651abdSSudarsana Reddy Kalluru 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
3239666db486STomer Tayar &resp, ¶m);
3240666db486STomer Tayar if (rc) {
324139651abdSSudarsana Reddy Kalluru DP_NOTICE(p_hwfn,
324239651abdSSudarsana Reddy Kalluru "Failed to send DCBX attention request\n");
3243666db486STomer Tayar return rc;
324439651abdSSudarsana Reddy Kalluru }
324539651abdSSudarsana Reddy Kalluru
3246fe56b9e6SYuval Mintz p_hwfn->hw_init_done = true;
3247fe56b9e6SYuval Mintz }
3248fe56b9e6SYuval Mintz
32490fefbfbaSSudarsana Kalluru if (IS_PF(cdev)) {
32500fefbfbaSSudarsana Kalluru p_hwfn = QED_LEADING_HWFN(cdev);
325150fdf601SSudarsana Reddy Kalluru
325250fdf601SSudarsana Reddy Kalluru /* Get pre-negotiated values for stag, bandwidth etc. */
325350fdf601SSudarsana Reddy Kalluru DP_VERBOSE(p_hwfn,
325450fdf601SSudarsana Reddy Kalluru QED_MSG_SPQ,
325550fdf601SSudarsana Reddy Kalluru "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
325650fdf601SSudarsana Reddy Kalluru drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
325750fdf601SSudarsana Reddy Kalluru rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
325850fdf601SSudarsana Reddy Kalluru DRV_MSG_CODE_GET_OEM_UPDATES,
325950fdf601SSudarsana Reddy Kalluru drv_mb_param, &resp, ¶m);
326050fdf601SSudarsana Reddy Kalluru if (rc)
326150fdf601SSudarsana Reddy Kalluru DP_NOTICE(p_hwfn,
326250fdf601SSudarsana Reddy Kalluru "Failed to send GET_OEM_UPDATES attention request\n");
326350fdf601SSudarsana Reddy Kalluru
32645d24bcf1STomer Tayar drv_mb_param = STORM_FW_VERSION;
32650fefbfbaSSudarsana Kalluru rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
32660fefbfbaSSudarsana Kalluru DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
32670fefbfbaSSudarsana Kalluru drv_mb_param, &load_code, ¶m);
32680fefbfbaSSudarsana Kalluru if (rc)
32690fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, "Failed to update firmware version\n");
32700fefbfbaSSudarsana Kalluru
32710fefbfbaSSudarsana Kalluru if (!b_default_mtu) {
32720fefbfbaSSudarsana Kalluru rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
32730fefbfbaSSudarsana Kalluru p_hwfn->hw_info.mtu);
32740fefbfbaSSudarsana Kalluru if (rc)
32750fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn,
32760fefbfbaSSudarsana Kalluru "Failed to update default mtu\n");
32770fefbfbaSSudarsana Kalluru }
32780fefbfbaSSudarsana Kalluru
32790fefbfbaSSudarsana Kalluru rc = qed_mcp_ov_update_driver_state(p_hwfn,
32800fefbfbaSSudarsana Kalluru p_hwfn->p_main_ptt,
32810fefbfbaSSudarsana Kalluru QED_OV_DRIVER_STATE_DISABLED);
32820fefbfbaSSudarsana Kalluru if (rc)
32830fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, "Failed to update driver state\n");
32840fefbfbaSSudarsana Kalluru
32850fefbfbaSSudarsana Kalluru rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
3286538f8d00SSudarsana Reddy Kalluru QED_OV_ESWITCH_NONE);
32870fefbfbaSSudarsana Kalluru if (rc)
32880fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
32890fefbfbaSSudarsana Kalluru }
32900fefbfbaSSudarsana Kalluru
3291fe56b9e6SYuval Mintz return 0;
3292666db486STomer Tayar
3293666db486STomer Tayar load_err:
3294666db486STomer Tayar /* The MFW load lock should be released also when initialization fails.
3295666db486STomer Tayar */
3296666db486STomer Tayar qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
3297666db486STomer Tayar return rc;
3298fe56b9e6SYuval Mintz }
3299fe56b9e6SYuval Mintz
3300fe56b9e6SYuval Mintz #define QED_HW_STOP_RETRY_LIMIT (10)
qed_hw_timers_stop(struct qed_dev * cdev,struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)33011a635e48SYuval Mintz static void qed_hw_timers_stop(struct qed_dev *cdev,
33021a635e48SYuval Mintz struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
33038c925c44SYuval Mintz {
33048c925c44SYuval Mintz int i;
33058c925c44SYuval Mintz
33068c925c44SYuval Mintz /* close timers */
33078c925c44SYuval Mintz qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
33088c925c44SYuval Mintz qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
33098c925c44SYuval Mintz
331064515dc8STomer Tayar if (cdev->recov_in_prog)
331164515dc8STomer Tayar return;
331264515dc8STomer Tayar
33138c925c44SYuval Mintz for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
33148c925c44SYuval Mintz if ((!qed_rd(p_hwfn, p_ptt,
33158c925c44SYuval Mintz TM_REG_PF_SCAN_ACTIVE_CONN)) &&
33161a635e48SYuval Mintz (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
33178c925c44SYuval Mintz break;
33188c925c44SYuval Mintz
33198c925c44SYuval Mintz /* Dependent on number of connection/tasks, possibly
33208c925c44SYuval Mintz * 1ms sleep is required between polls
33218c925c44SYuval Mintz */
33228c925c44SYuval Mintz usleep_range(1000, 2000);
33238c925c44SYuval Mintz }
33248c925c44SYuval Mintz
33258c925c44SYuval Mintz if (i < QED_HW_STOP_RETRY_LIMIT)
33268c925c44SYuval Mintz return;
33278c925c44SYuval Mintz
33288c925c44SYuval Mintz DP_NOTICE(p_hwfn,
33298c925c44SYuval Mintz "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
33308c925c44SYuval Mintz (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
33318c925c44SYuval Mintz (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
33328c925c44SYuval Mintz }
33338c925c44SYuval Mintz
qed_hw_timers_stop_all(struct qed_dev * cdev)33348c925c44SYuval Mintz void qed_hw_timers_stop_all(struct qed_dev *cdev)
33358c925c44SYuval Mintz {
33368c925c44SYuval Mintz int j;
33378c925c44SYuval Mintz
33388c925c44SYuval Mintz for_each_hwfn(cdev, j) {
33398c925c44SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
33408c925c44SYuval Mintz struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
33418c925c44SYuval Mintz
33428c925c44SYuval Mintz qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
33438c925c44SYuval Mintz }
33448c925c44SYuval Mintz }
33458c925c44SYuval Mintz
qed_hw_stop(struct qed_dev * cdev)3346fe56b9e6SYuval Mintz int qed_hw_stop(struct qed_dev *cdev)
3347fe56b9e6SYuval Mintz {
33481226337aSTomer Tayar struct qed_hwfn *p_hwfn;
33491226337aSTomer Tayar struct qed_ptt *p_ptt;
33501226337aSTomer Tayar int rc, rc2 = 0;
33518c925c44SYuval Mintz int j;
3352fe56b9e6SYuval Mintz
3353fe56b9e6SYuval Mintz for_each_hwfn(cdev, j) {
33541226337aSTomer Tayar p_hwfn = &cdev->hwfns[j];
33551226337aSTomer Tayar p_ptt = p_hwfn->p_main_ptt;
3356fe56b9e6SYuval Mintz
3357fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
3358fe56b9e6SYuval Mintz
33591408cc1fSYuval Mintz if (IS_VF(cdev)) {
33600b55e27dSYuval Mintz qed_vf_pf_int_cleanup(p_hwfn);
33611226337aSTomer Tayar rc = qed_vf_pf_reset(p_hwfn);
33621226337aSTomer Tayar if (rc) {
33631226337aSTomer Tayar DP_NOTICE(p_hwfn,
33641226337aSTomer Tayar "qed_vf_pf_reset failed. rc = %d.\n",
33651226337aSTomer Tayar rc);
33661226337aSTomer Tayar rc2 = -EINVAL;
33671226337aSTomer Tayar }
33681408cc1fSYuval Mintz continue;
33691408cc1fSYuval Mintz }
33701408cc1fSYuval Mintz
3371fe56b9e6SYuval Mintz /* mark the hw as uninitialized... */
3372fe56b9e6SYuval Mintz p_hwfn->hw_init_done = false;
3373fe56b9e6SYuval Mintz
33741226337aSTomer Tayar /* Send unload command to MCP */
337564515dc8STomer Tayar if (!cdev->recov_in_prog) {
33761226337aSTomer Tayar rc = qed_mcp_unload_req(p_hwfn, p_ptt);
33771226337aSTomer Tayar if (rc) {
33788c925c44SYuval Mintz DP_NOTICE(p_hwfn,
33791226337aSTomer Tayar "Failed sending a UNLOAD_REQ command. rc = %d.\n",
33801226337aSTomer Tayar rc);
33811226337aSTomer Tayar rc2 = -EINVAL;
33821226337aSTomer Tayar }
338364515dc8STomer Tayar }
33841226337aSTomer Tayar
33851226337aSTomer Tayar qed_slowpath_irq_sync(p_hwfn);
33861226337aSTomer Tayar
33871226337aSTomer Tayar /* After this point no MFW attentions are expected, e.g. prevent
33881226337aSTomer Tayar * race between pf stop and dcbx pf update.
33891226337aSTomer Tayar */
33901226337aSTomer Tayar rc = qed_sp_pf_stop(p_hwfn);
33911226337aSTomer Tayar if (rc) {
33921226337aSTomer Tayar DP_NOTICE(p_hwfn,
33931226337aSTomer Tayar "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
33941226337aSTomer Tayar rc);
33951226337aSTomer Tayar rc2 = -EINVAL;
33961226337aSTomer Tayar }
3397fe56b9e6SYuval Mintz
3398fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt,
3399fe56b9e6SYuval Mintz NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
3400fe56b9e6SYuval Mintz
3401fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
3402fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
3403fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
3404fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
3405fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
3406fe56b9e6SYuval Mintz
34078c925c44SYuval Mintz qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
3408fe56b9e6SYuval Mintz
3409fe56b9e6SYuval Mintz /* Disable Attention Generation */
3410fe56b9e6SYuval Mintz qed_int_igu_disable_int(p_hwfn, p_ptt);
3411fe56b9e6SYuval Mintz
3412fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
3413fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
3414fe56b9e6SYuval Mintz
3415fe56b9e6SYuval Mintz qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
3416fe56b9e6SYuval Mintz
3417fe56b9e6SYuval Mintz /* Need to wait 1ms to guarantee SBs are cleared */
3418fe56b9e6SYuval Mintz usleep_range(1000, 2000);
34191226337aSTomer Tayar
34201226337aSTomer Tayar /* Disable PF in HW blocks */
34211226337aSTomer Tayar qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
34221226337aSTomer Tayar qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
34231226337aSTomer Tayar
342479284adeSMichal Kalderon if (IS_LEAD_HWFN(p_hwfn) &&
342579284adeSMichal Kalderon test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) &&
342679284adeSMichal Kalderon !QED_IS_FCOE_PERSONALITY(p_hwfn))
342779284adeSMichal Kalderon qed_llh_remove_mac_filter(cdev, 0,
342879284adeSMichal Kalderon p_hwfn->hw_info.hw_mac_addr);
342979284adeSMichal Kalderon
343064515dc8STomer Tayar if (!cdev->recov_in_prog) {
343164515dc8STomer Tayar rc = qed_mcp_unload_done(p_hwfn, p_ptt);
34321226337aSTomer Tayar if (rc) {
34331226337aSTomer Tayar DP_NOTICE(p_hwfn,
34341226337aSTomer Tayar "Failed sending a UNLOAD_DONE command. rc = %d.\n",
34351226337aSTomer Tayar rc);
34361226337aSTomer Tayar rc2 = -EINVAL;
34371226337aSTomer Tayar }
3438fe56b9e6SYuval Mintz }
343964515dc8STomer Tayar }
3440fe56b9e6SYuval Mintz
344164515dc8STomer Tayar if (IS_PF(cdev) && !cdev->recov_in_prog) {
34421226337aSTomer Tayar p_hwfn = QED_LEADING_HWFN(cdev);
34431226337aSTomer Tayar p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
34441226337aSTomer Tayar
3445666db486STomer Tayar /* Clear the PF's internal FID_enable in the PXP.
3446666db486STomer Tayar * In CMT this should only be done for first hw-function, and
3447666db486STomer Tayar * only after all transactions have stopped for all active
3448666db486STomer Tayar * hw-functions.
3449fe56b9e6SYuval Mintz */
3450666db486STomer Tayar rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
34511226337aSTomer Tayar if (rc) {
34521226337aSTomer Tayar DP_NOTICE(p_hwfn,
3453666db486STomer Tayar "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
3454666db486STomer Tayar rc);
34551226337aSTomer Tayar rc2 = -EINVAL;
34561226337aSTomer Tayar }
34571408cc1fSYuval Mintz }
3458fe56b9e6SYuval Mintz
34591226337aSTomer Tayar return rc2;
3460fe56b9e6SYuval Mintz }
3461fe56b9e6SYuval Mintz
qed_hw_stop_fastpath(struct qed_dev * cdev)346215582962SRahul Verma int qed_hw_stop_fastpath(struct qed_dev *cdev)
3463cee4d264SManish Chopra {
34648c925c44SYuval Mintz int j;
3465cee4d264SManish Chopra
3466cee4d264SManish Chopra for_each_hwfn(cdev, j) {
3467cee4d264SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
346815582962SRahul Verma struct qed_ptt *p_ptt;
3469cee4d264SManish Chopra
3470dacd88d6SYuval Mintz if (IS_VF(cdev)) {
3471dacd88d6SYuval Mintz qed_vf_pf_int_cleanup(p_hwfn);
3472dacd88d6SYuval Mintz continue;
3473dacd88d6SYuval Mintz }
347415582962SRahul Verma p_ptt = qed_ptt_acquire(p_hwfn);
347515582962SRahul Verma if (!p_ptt)
347615582962SRahul Verma return -EAGAIN;
3477dacd88d6SYuval Mintz
3478cee4d264SManish Chopra DP_VERBOSE(p_hwfn,
34791a635e48SYuval Mintz NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
3480cee4d264SManish Chopra
3481cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt,
3482cee4d264SManish Chopra NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
3483cee4d264SManish Chopra
3484cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
3485cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
3486cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
3487cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
3488cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
3489cee4d264SManish Chopra
3490cee4d264SManish Chopra qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
3491cee4d264SManish Chopra
3492cee4d264SManish Chopra /* Need to wait 1ms to guarantee SBs are cleared */
3493cee4d264SManish Chopra usleep_range(1000, 2000);
349415582962SRahul Verma qed_ptt_release(p_hwfn, p_ptt);
3495cee4d264SManish Chopra }
3496cee4d264SManish Chopra
349715582962SRahul Verma return 0;
349815582962SRahul Verma }
349915582962SRahul Verma
qed_hw_start_fastpath(struct qed_hwfn * p_hwfn)350015582962SRahul Verma int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
3501cee4d264SManish Chopra {
350215582962SRahul Verma struct qed_ptt *p_ptt;
350315582962SRahul Verma
3504dacd88d6SYuval Mintz if (IS_VF(p_hwfn->cdev))
350515582962SRahul Verma return 0;
350615582962SRahul Verma
350715582962SRahul Verma p_ptt = qed_ptt_acquire(p_hwfn);
350815582962SRahul Verma if (!p_ptt)
350915582962SRahul Verma return -EAGAIN;
3510dacd88d6SYuval Mintz
3511f855df22SMichal Kalderon if (p_hwfn->p_rdma_info &&
3512291d57f6SMichal Kalderon p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs)
3513f855df22SMichal Kalderon qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
3514f855df22SMichal Kalderon
3515cee4d264SManish Chopra /* Re-open incoming traffic */
351615582962SRahul Verma qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
351715582962SRahul Verma qed_ptt_release(p_hwfn, p_ptt);
351815582962SRahul Verma
351915582962SRahul Verma return 0;
3520cee4d264SManish Chopra }
3521cee4d264SManish Chopra
3522fe56b9e6SYuval Mintz /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
qed_hw_hwfn_free(struct qed_hwfn * p_hwfn)3523fe56b9e6SYuval Mintz static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
3524fe56b9e6SYuval Mintz {
3525fe56b9e6SYuval Mintz qed_ptt_pool_free(p_hwfn);
3526fe56b9e6SYuval Mintz kfree(p_hwfn->hw_info.p_igu_info);
35273587cb87STomer Tayar p_hwfn->hw_info.p_igu_info = NULL;
3528fe56b9e6SYuval Mintz }
3529fe56b9e6SYuval Mintz
3530fe56b9e6SYuval Mintz /* Setup bar access */
qed_hw_hwfn_prepare(struct qed_hwfn * p_hwfn)353112e09c69SYuval Mintz static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
3532fe56b9e6SYuval Mintz {
3533fe56b9e6SYuval Mintz /* clear indirect access */
35349c79ddaaSMintz, Yuval if (QED_IS_AH(p_hwfn->cdev)) {
35359c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt,
35369c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
35379c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt,
35389c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
35399c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt,
35409c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
35419c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt,
35429c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
35439c79ddaaSMintz, Yuval } else {
35449c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt,
35459c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
35469c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt,
35479c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
35489c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt,
35499c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
35509c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt,
35519c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
35529c79ddaaSMintz, Yuval }
3553fe56b9e6SYuval Mintz
3554666db486STomer Tayar /* Clean previous pglue_b errors if such exist */
3555666db486STomer Tayar qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
3556fe56b9e6SYuval Mintz
3557fe56b9e6SYuval Mintz /* enable internal target-read */
3558fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_hwfn->p_main_ptt,
3559fe56b9e6SYuval Mintz PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
3560fe56b9e6SYuval Mintz }
3561fe56b9e6SYuval Mintz
get_function_id(struct qed_hwfn * p_hwfn)3562fe56b9e6SYuval Mintz static void get_function_id(struct qed_hwfn *p_hwfn)
3563fe56b9e6SYuval Mintz {
3564fe56b9e6SYuval Mintz /* ME Register */
35651a635e48SYuval Mintz p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
35661a635e48SYuval Mintz PXP_PF_ME_OPAQUE_ADDR);
3567fe56b9e6SYuval Mintz
3568fe56b9e6SYuval Mintz p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
3569fe56b9e6SYuval Mintz
3570fe56b9e6SYuval Mintz p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
3571fe56b9e6SYuval Mintz p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
3572fe56b9e6SYuval Mintz PXP_CONCRETE_FID_PFID);
3573fe56b9e6SYuval Mintz p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
3574fe56b9e6SYuval Mintz PXP_CONCRETE_FID_PORT);
3575525ef5c0SYuval Mintz
3576525ef5c0SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
3577525ef5c0SYuval Mintz "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
3578525ef5c0SYuval Mintz p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
3579fe56b9e6SYuval Mintz }
3580fe56b9e6SYuval Mintz
qed_hw_set_feat(struct qed_hwfn * p_hwfn)358125c089d7SYuval Mintz static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
358225c089d7SYuval Mintz {
358325c089d7SYuval Mintz u32 *feat_num = p_hwfn->hw_info.feat_num;
3584ebbdcc66SMintz, Yuval struct qed_sb_cnt_info sb_cnt;
3585810bb1f0SMintz, Yuval u32 non_l2_sbs = 0;
358625c089d7SYuval Mintz
3587ebbdcc66SMintz, Yuval memset(&sb_cnt, 0, sizeof(sb_cnt));
3588ebbdcc66SMintz, Yuval qed_int_get_num_sbs(p_hwfn, &sb_cnt);
3589ebbdcc66SMintz, Yuval
35900189efb8SYuval Mintz if (IS_ENABLED(CONFIG_QED_RDMA) &&
3591c851a9dcSKalderon, Michal QED_IS_RDMA_PERSONALITY(p_hwfn)) {
35920189efb8SYuval Mintz /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
35930189efb8SYuval Mintz * the status blocks equally between L2 / RoCE but with
35940189efb8SYuval Mintz * consideration as to how many l2 queues / cnqs we have.
359551ff1725SRam Amrani */
359651ff1725SRam Amrani feat_num[QED_RDMA_CNQ] =
3597ebbdcc66SMintz, Yuval min_t(u32, sb_cnt.cnt / 2,
359851ff1725SRam Amrani RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
3599810bb1f0SMintz, Yuval
3600810bb1f0SMintz, Yuval non_l2_sbs = feat_num[QED_RDMA_CNQ];
360151ff1725SRam Amrani }
3602c851a9dcSKalderon, Michal if (QED_IS_L2_PERSONALITY(p_hwfn)) {
3603dec26533SMintz, Yuval /* Start by allocating VF queues, then PF's */
3604dec26533SMintz, Yuval feat_num[QED_VF_L2_QUE] = min_t(u32,
3605dec26533SMintz, Yuval RESC_NUM(p_hwfn, QED_L2_QUEUE),
3606ebbdcc66SMintz, Yuval sb_cnt.iov_cnt);
3607810bb1f0SMintz, Yuval feat_num[QED_PF_L2_QUE] = min_t(u32,
3608ebbdcc66SMintz, Yuval sb_cnt.cnt - non_l2_sbs,
3609dec26533SMintz, Yuval RESC_NUM(p_hwfn,
3610dec26533SMintz, Yuval QED_L2_QUEUE) -
3611dec26533SMintz, Yuval FEAT_NUM(p_hwfn,
3612dec26533SMintz, Yuval QED_VF_L2_QUE));
3613dec26533SMintz, Yuval }
36145a1f965aSMintz, Yuval
3615c851a9dcSKalderon, Michal if (QED_IS_FCOE_PERSONALITY(p_hwfn))
36163c5da942SMintz, Yuval feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt,
36173c5da942SMintz, Yuval RESC_NUM(p_hwfn,
36183c5da942SMintz, Yuval QED_CMDQS_CQS));
36193c5da942SMintz, Yuval
3620c851a9dcSKalderon, Michal if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
3621ebbdcc66SMintz, Yuval feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
362208737a3fSMintz, Yuval RESC_NUM(p_hwfn,
362308737a3fSMintz, Yuval QED_CMDQS_CQS));
3624897e87a1SShai Malin
3625897e87a1SShai Malin if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
3626897e87a1SShai Malin feat_num[QED_NVMETCP_CQ] = min_t(u32, sb_cnt.cnt,
3627897e87a1SShai Malin RESC_NUM(p_hwfn,
3628897e87a1SShai Malin QED_CMDQS_CQS));
3629897e87a1SShai Malin
36305a1f965aSMintz, Yuval DP_VERBOSE(p_hwfn,
36315a1f965aSMintz, Yuval NETIF_MSG_PROBE,
3632897e87a1SShai Malin "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d NVMETCP_CQ=%d #SBS=%d\n",
36335a1f965aSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
36345a1f965aSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
36355a1f965aSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
36363c5da942SMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ),
363708737a3fSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
3638897e87a1SShai Malin (int)FEAT_NUM(p_hwfn, QED_NVMETCP_CQ),
3639ebbdcc66SMintz, Yuval (int)sb_cnt.cnt);
364025c089d7SYuval Mintz }
364125c089d7SYuval Mintz
qed_hw_get_resc_name(enum qed_resources res_id)36429c8517c4STomer Tayar const char *qed_hw_get_resc_name(enum qed_resources res_id)
36432edbff8dSTomer Tayar {
36442edbff8dSTomer Tayar switch (res_id) {
36452edbff8dSTomer Tayar case QED_L2_QUEUE:
36462edbff8dSTomer Tayar return "L2_QUEUE";
36472edbff8dSTomer Tayar case QED_VPORT:
36482edbff8dSTomer Tayar return "VPORT";
36492edbff8dSTomer Tayar case QED_RSS_ENG:
36502edbff8dSTomer Tayar return "RSS_ENG";
36512edbff8dSTomer Tayar case QED_PQ:
36522edbff8dSTomer Tayar return "PQ";
36532edbff8dSTomer Tayar case QED_RL:
36542edbff8dSTomer Tayar return "RL";
36552edbff8dSTomer Tayar case QED_MAC:
36562edbff8dSTomer Tayar return "MAC";
36572edbff8dSTomer Tayar case QED_VLAN:
36582edbff8dSTomer Tayar return "VLAN";
36592edbff8dSTomer Tayar case QED_RDMA_CNQ_RAM:
36602edbff8dSTomer Tayar return "RDMA_CNQ_RAM";
36612edbff8dSTomer Tayar case QED_ILT:
36622edbff8dSTomer Tayar return "ILT";
3663997af5dfSMichal Kalderon case QED_LL2_RAM_QUEUE:
3664997af5dfSMichal Kalderon return "LL2_RAM_QUEUE";
3665997af5dfSMichal Kalderon case QED_LL2_CTX_QUEUE:
3666997af5dfSMichal Kalderon return "LL2_CTX_QUEUE";
36672edbff8dSTomer Tayar case QED_CMDQS_CQS:
36682edbff8dSTomer Tayar return "CMDQS_CQS";
36692edbff8dSTomer Tayar case QED_RDMA_STATS_QUEUE:
36702edbff8dSTomer Tayar return "RDMA_STATS_QUEUE";
36719c8517c4STomer Tayar case QED_BDQ:
36729c8517c4STomer Tayar return "BDQ";
36739c8517c4STomer Tayar case QED_SB:
36749c8517c4STomer Tayar return "SB";
36752edbff8dSTomer Tayar default:
36762edbff8dSTomer Tayar return "UNKNOWN_RESOURCE";
36772edbff8dSTomer Tayar }
36782edbff8dSTomer Tayar }
36792edbff8dSTomer Tayar
36809c8517c4STomer Tayar static int
__qed_hw_set_soft_resc_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_resources res_id,u32 resc_max_val,u32 * p_mcp_resp)36819c8517c4STomer Tayar __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
36829c8517c4STomer Tayar struct qed_ptt *p_ptt,
36839c8517c4STomer Tayar enum qed_resources res_id,
36849c8517c4STomer Tayar u32 resc_max_val, u32 *p_mcp_resp)
36859c8517c4STomer Tayar {
36869c8517c4STomer Tayar int rc;
36879c8517c4STomer Tayar
36889c8517c4STomer Tayar rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
36899c8517c4STomer Tayar resc_max_val, p_mcp_resp);
36909c8517c4STomer Tayar if (rc) {
36919c8517c4STomer Tayar DP_NOTICE(p_hwfn,
36929c8517c4STomer Tayar "MFW response failure for a max value setting of resource %d [%s]\n",
36939c8517c4STomer Tayar res_id, qed_hw_get_resc_name(res_id));
36949c8517c4STomer Tayar return rc;
36959c8517c4STomer Tayar }
36969c8517c4STomer Tayar
36979c8517c4STomer Tayar if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
36989c8517c4STomer Tayar DP_INFO(p_hwfn,
36999c8517c4STomer Tayar "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
37009c8517c4STomer Tayar res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
37019c8517c4STomer Tayar
37029c8517c4STomer Tayar return 0;
37039c8517c4STomer Tayar }
37049c8517c4STomer Tayar
37051392d19fSMichal Kalderon static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = {
37061392d19fSMichal Kalderon {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
37071392d19fSMichal Kalderon {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
37081392d19fSMichal Kalderon {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
37091392d19fSMichal Kalderon {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,},
37101392d19fSMichal Kalderon {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
37111392d19fSMichal Kalderon {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
37121392d19fSMichal Kalderon {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
37131392d19fSMichal Kalderon {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
37141392d19fSMichal Kalderon {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
37151392d19fSMichal Kalderon {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
37161392d19fSMichal Kalderon {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
37171392d19fSMichal Kalderon {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
37181392d19fSMichal Kalderon {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
37191392d19fSMichal Kalderon };
37201392d19fSMichal Kalderon
qed_get_hsi_def_val(struct qed_dev * cdev,enum qed_hsi_def_type type)37211392d19fSMichal Kalderon u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
37221392d19fSMichal Kalderon {
37231392d19fSMichal Kalderon enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
37241392d19fSMichal Kalderon
37251392d19fSMichal Kalderon if (type >= QED_NUM_HSI_DEFS) {
37261392d19fSMichal Kalderon DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type);
37271392d19fSMichal Kalderon return 0;
37281392d19fSMichal Kalderon }
37291392d19fSMichal Kalderon
37301392d19fSMichal Kalderon return qed_hsi_def_val[type][chip_id];
37311392d19fSMichal Kalderon }
3732fe40a830SPrabhakar Kushwaha
37339c8517c4STomer Tayar static int
qed_hw_set_soft_resc_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)37349c8517c4STomer Tayar qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
37359c8517c4STomer Tayar {
37369c8517c4STomer Tayar u32 resc_max_val, mcp_resp;
37379c8517c4STomer Tayar u8 res_id;
37389c8517c4STomer Tayar int rc;
3739fe40a830SPrabhakar Kushwaha
37409c8517c4STomer Tayar for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
37419c8517c4STomer Tayar switch (res_id) {
3742997af5dfSMichal Kalderon case QED_LL2_RAM_QUEUE:
3743997af5dfSMichal Kalderon resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES;
3744997af5dfSMichal Kalderon break;
3745997af5dfSMichal Kalderon case QED_LL2_CTX_QUEUE:
3746997af5dfSMichal Kalderon resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES;
37479c8517c4STomer Tayar break;
37489c8517c4STomer Tayar case QED_RDMA_CNQ_RAM:
37499c8517c4STomer Tayar /* No need for a case for QED_CMDQS_CQS since
37509c8517c4STomer Tayar * CNQ/CMDQS are the same resource.
37519c8517c4STomer Tayar */
3752da090917STomer Tayar resc_max_val = NUM_OF_GLOBAL_QUEUES;
37539c8517c4STomer Tayar break;
37549c8517c4STomer Tayar case QED_RDMA_STATS_QUEUE:
37551392d19fSMichal Kalderon resc_max_val =
37561392d19fSMichal Kalderon NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev);
37579c8517c4STomer Tayar break;
37589c8517c4STomer Tayar case QED_BDQ:
37599c8517c4STomer Tayar resc_max_val = BDQ_NUM_RESOURCES;
37609c8517c4STomer Tayar break;
37619c8517c4STomer Tayar default:
37629c8517c4STomer Tayar continue;
37639c8517c4STomer Tayar }
37649c8517c4STomer Tayar
37659c8517c4STomer Tayar rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
37669c8517c4STomer Tayar resc_max_val, &mcp_resp);
37679c8517c4STomer Tayar if (rc)
37689c8517c4STomer Tayar return rc;
37699c8517c4STomer Tayar
37709c8517c4STomer Tayar /* There's no point to continue to the next resource if the
37719c8517c4STomer Tayar * command is not supported by the MFW.
37729c8517c4STomer Tayar * We do continue if the command is supported but the resource
37739c8517c4STomer Tayar * is unknown to the MFW. Such a resource will be later
37749c8517c4STomer Tayar * configured with the default allocation values.
37759c8517c4STomer Tayar */
37769c8517c4STomer Tayar if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
37779c8517c4STomer Tayar return -EINVAL;
37789c8517c4STomer Tayar }
37799c8517c4STomer Tayar
37809c8517c4STomer Tayar return 0;
37819c8517c4STomer Tayar }
37829c8517c4STomer Tayar
37839c8517c4STomer Tayar static
qed_hw_get_dflt_resc(struct qed_hwfn * p_hwfn,enum qed_resources res_id,u32 * p_resc_num,u32 * p_resc_start)37849c8517c4STomer Tayar int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
37859c8517c4STomer Tayar enum qed_resources res_id,
37869c8517c4STomer Tayar u32 *p_resc_num, u32 *p_resc_start)
37879c8517c4STomer Tayar {
37889c8517c4STomer Tayar u8 num_funcs = p_hwfn->num_funcs_on_engine;
37891392d19fSMichal Kalderon struct qed_dev *cdev = p_hwfn->cdev;
37909c8517c4STomer Tayar
37919c8517c4STomer Tayar switch (res_id) {
37929c8517c4STomer Tayar case QED_L2_QUEUE:
37931392d19fSMichal Kalderon *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs;
37949c8517c4STomer Tayar break;
37959c8517c4STomer Tayar case QED_VPORT:
37961392d19fSMichal Kalderon *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs;
37979c8517c4STomer Tayar break;
37989c8517c4STomer Tayar case QED_RSS_ENG:
37991392d19fSMichal Kalderon *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs;
38009c8517c4STomer Tayar break;
38019c8517c4STomer Tayar case QED_PQ:
38021392d19fSMichal Kalderon *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs;
38039c8517c4STomer Tayar *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
38049c8517c4STomer Tayar break;
38059c8517c4STomer Tayar case QED_RL:
38061392d19fSMichal Kalderon *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs;
38079c8517c4STomer Tayar break;
38089c8517c4STomer Tayar case QED_MAC:
38099c8517c4STomer Tayar case QED_VLAN:
38109c8517c4STomer Tayar /* Each VFC resource can accommodate both a MAC and a VLAN */
38119c8517c4STomer Tayar *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
38129c8517c4STomer Tayar break;
38139c8517c4STomer Tayar case QED_ILT:
38141392d19fSMichal Kalderon *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs;
38159c8517c4STomer Tayar break;
3816997af5dfSMichal Kalderon case QED_LL2_RAM_QUEUE:
3817997af5dfSMichal Kalderon *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
3818997af5dfSMichal Kalderon break;
3819997af5dfSMichal Kalderon case QED_LL2_CTX_QUEUE:
3820997af5dfSMichal Kalderon *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs;
38219c8517c4STomer Tayar break;
38229c8517c4STomer Tayar case QED_RDMA_CNQ_RAM:
38239c8517c4STomer Tayar case QED_CMDQS_CQS:
38249c8517c4STomer Tayar /* CNQ/CMDQS are the same resource */
3825da090917STomer Tayar *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
38269c8517c4STomer Tayar break;
38279c8517c4STomer Tayar case QED_RDMA_STATS_QUEUE:
38281392d19fSMichal Kalderon *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs;
38299c8517c4STomer Tayar break;
38309c8517c4STomer Tayar case QED_BDQ:
38319c8517c4STomer Tayar if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
3832897e87a1SShai Malin p_hwfn->hw_info.personality != QED_PCI_FCOE &&
3833897e87a1SShai Malin p_hwfn->hw_info.personality != QED_PCI_NVMETCP)
38349c8517c4STomer Tayar *p_resc_num = 0;
38359c8517c4STomer Tayar else
38369c8517c4STomer Tayar *p_resc_num = 1;
38379c8517c4STomer Tayar break;
38389c8517c4STomer Tayar case QED_SB:
3839ebbdcc66SMintz, Yuval /* Since we want its value to reflect whether MFW supports
3840ebbdcc66SMintz, Yuval * the new scheme, have a default of 0.
3841ebbdcc66SMintz, Yuval */
3842ebbdcc66SMintz, Yuval *p_resc_num = 0;
38439c8517c4STomer Tayar break;
38449c8517c4STomer Tayar default:
38459c8517c4STomer Tayar return -EINVAL;
38469c8517c4STomer Tayar }
38479c8517c4STomer Tayar
38489c8517c4STomer Tayar switch (res_id) {
38499c8517c4STomer Tayar case QED_BDQ:
38509c8517c4STomer Tayar if (!*p_resc_num)
38519c8517c4STomer Tayar *p_resc_start = 0;
385278cea9ffSTomer Tayar else if (p_hwfn->cdev->num_ports_in_engine == 4)
38539c8517c4STomer Tayar *p_resc_start = p_hwfn->port_id;
3854897e87a1SShai Malin else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
3855897e87a1SShai Malin p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
38569c8517c4STomer Tayar *p_resc_start = p_hwfn->port_id;
38579c8517c4STomer Tayar else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
38589c8517c4STomer Tayar *p_resc_start = p_hwfn->port_id + 2;
38599c8517c4STomer Tayar break;
38609c8517c4STomer Tayar default:
38619c8517c4STomer Tayar *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
38629c8517c4STomer Tayar break;
38639c8517c4STomer Tayar }
38649c8517c4STomer Tayar
38659c8517c4STomer Tayar return 0;
38669c8517c4STomer Tayar }
38679c8517c4STomer Tayar
__qed_hw_set_resc_info(struct qed_hwfn * p_hwfn,enum qed_resources res_id)38689c8517c4STomer Tayar static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
38692edbff8dSTomer Tayar enum qed_resources res_id)
38702edbff8dSTomer Tayar {
38719c8517c4STomer Tayar u32 dflt_resc_num = 0, dflt_resc_start = 0;
38729c8517c4STomer Tayar u32 mcp_resp, *p_resc_num, *p_resc_start;
38732edbff8dSTomer Tayar int rc;
38742edbff8dSTomer Tayar
38752edbff8dSTomer Tayar p_resc_num = &RESC_NUM(p_hwfn, res_id);
38762edbff8dSTomer Tayar p_resc_start = &RESC_START(p_hwfn, res_id);
38772edbff8dSTomer Tayar
38789c8517c4STomer Tayar rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
38799c8517c4STomer Tayar &dflt_resc_start);
38809c8517c4STomer Tayar if (rc) {
38812edbff8dSTomer Tayar DP_ERR(p_hwfn,
38822edbff8dSTomer Tayar "Failed to get default amount for resource %d [%s]\n",
38832edbff8dSTomer Tayar res_id, qed_hw_get_resc_name(res_id));
38849c8517c4STomer Tayar return rc;
38852edbff8dSTomer Tayar }
38862edbff8dSTomer Tayar
38879c8517c4STomer Tayar rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
38889c8517c4STomer Tayar &mcp_resp, p_resc_num, p_resc_start);
38892edbff8dSTomer Tayar if (rc) {
38902edbff8dSTomer Tayar DP_NOTICE(p_hwfn,
38912edbff8dSTomer Tayar "MFW response failure for an allocation request for resource %d [%s]\n",
38922edbff8dSTomer Tayar res_id, qed_hw_get_resc_name(res_id));
38932edbff8dSTomer Tayar return rc;
38942edbff8dSTomer Tayar }
38952edbff8dSTomer Tayar
38962edbff8dSTomer Tayar /* Default driver values are applied in the following cases:
38972edbff8dSTomer Tayar * - The resource allocation MB command is not supported by the MFW
38982edbff8dSTomer Tayar * - There is an internal error in the MFW while processing the request
38992edbff8dSTomer Tayar * - The resource ID is unknown to the MFW
39002edbff8dSTomer Tayar */
39019c8517c4STomer Tayar if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
39029c8517c4STomer Tayar DP_INFO(p_hwfn,
39039c8517c4STomer Tayar "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
39042edbff8dSTomer Tayar res_id,
39052edbff8dSTomer Tayar qed_hw_get_resc_name(res_id),
39062edbff8dSTomer Tayar mcp_resp, dflt_resc_num, dflt_resc_start);
39072edbff8dSTomer Tayar *p_resc_num = dflt_resc_num;
39082edbff8dSTomer Tayar *p_resc_start = dflt_resc_start;
39092edbff8dSTomer Tayar goto out;
39102edbff8dSTomer Tayar }
39112edbff8dSTomer Tayar
39122edbff8dSTomer Tayar out:
39132edbff8dSTomer Tayar /* PQs have to divide by 8 [that's the HW granularity].
39142edbff8dSTomer Tayar * Reduce number so it would fit.
39152edbff8dSTomer Tayar */
39162edbff8dSTomer Tayar if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) {
39172edbff8dSTomer Tayar DP_INFO(p_hwfn,
39182edbff8dSTomer Tayar "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
39192edbff8dSTomer Tayar *p_resc_num,
39202edbff8dSTomer Tayar (*p_resc_num) & ~0x7,
39212edbff8dSTomer Tayar *p_resc_start, (*p_resc_start) & ~0x7);
39222edbff8dSTomer Tayar *p_resc_num &= ~0x7;
39232edbff8dSTomer Tayar *p_resc_start &= ~0x7;
39242edbff8dSTomer Tayar }
39252edbff8dSTomer Tayar
39262edbff8dSTomer Tayar return 0;
39272edbff8dSTomer Tayar }
39282edbff8dSTomer Tayar
qed_hw_set_resc_info(struct qed_hwfn * p_hwfn)39299c8517c4STomer Tayar static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
3930fe56b9e6SYuval Mintz {
39319c8517c4STomer Tayar int rc;
39329c8517c4STomer Tayar u8 res_id;
39339c8517c4STomer Tayar
39349c8517c4STomer Tayar for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
39359c8517c4STomer Tayar rc = __qed_hw_set_resc_info(p_hwfn, res_id);
39369c8517c4STomer Tayar if (rc)
39379c8517c4STomer Tayar return rc;
39389c8517c4STomer Tayar }
39399c8517c4STomer Tayar
39409c8517c4STomer Tayar return 0;
39419c8517c4STomer Tayar }
39429c8517c4STomer Tayar
qed_hw_get_ppfid_bitmap(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)394379284adeSMichal Kalderon static int qed_hw_get_ppfid_bitmap(struct qed_hwfn *p_hwfn,
394479284adeSMichal Kalderon struct qed_ptt *p_ptt)
394579284adeSMichal Kalderon {
394679284adeSMichal Kalderon struct qed_dev *cdev = p_hwfn->cdev;
394779284adeSMichal Kalderon u8 native_ppfid_idx;
394879284adeSMichal Kalderon int rc;
394979284adeSMichal Kalderon
395079284adeSMichal Kalderon /* Calculation of BB/AH is different for native_ppfid_idx */
395179284adeSMichal Kalderon if (QED_IS_BB(cdev))
395279284adeSMichal Kalderon native_ppfid_idx = p_hwfn->rel_pf_id;
395379284adeSMichal Kalderon else
395479284adeSMichal Kalderon native_ppfid_idx = p_hwfn->rel_pf_id /
395579284adeSMichal Kalderon cdev->num_ports_in_engine;
395679284adeSMichal Kalderon
395779284adeSMichal Kalderon rc = qed_mcp_get_ppfid_bitmap(p_hwfn, p_ptt);
395879284adeSMichal Kalderon if (rc != 0 && rc != -EOPNOTSUPP)
395979284adeSMichal Kalderon return rc;
396079284adeSMichal Kalderon else if (rc == -EOPNOTSUPP)
396179284adeSMichal Kalderon cdev->ppfid_bitmap = 0x1 << native_ppfid_idx;
396279284adeSMichal Kalderon
396379284adeSMichal Kalderon if (!(cdev->ppfid_bitmap & (0x1 << native_ppfid_idx))) {
396479284adeSMichal Kalderon DP_INFO(p_hwfn,
39651b3855abSColin Ian King "Fix the PPFID bitmap to include the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n",
396679284adeSMichal Kalderon native_ppfid_idx, cdev->ppfid_bitmap);
396779284adeSMichal Kalderon cdev->ppfid_bitmap = 0x1 << native_ppfid_idx;
396879284adeSMichal Kalderon }
396979284adeSMichal Kalderon
397079284adeSMichal Kalderon return 0;
397179284adeSMichal Kalderon }
397279284adeSMichal Kalderon
qed_hw_get_resc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)39739c8517c4STomer Tayar static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
39749c8517c4STomer Tayar {
39759c8517c4STomer Tayar struct qed_resc_unlock_params resc_unlock_params;
39769c8517c4STomer Tayar struct qed_resc_lock_params resc_lock_params;
39779c79ddaaSMintz, Yuval bool b_ah = QED_IS_AH(p_hwfn->cdev);
39782edbff8dSTomer Tayar u8 res_id;
39792edbff8dSTomer Tayar int rc;
3980fe56b9e6SYuval Mintz
39819c8517c4STomer Tayar /* Setting the max values of the soft resources and the following
39829c8517c4STomer Tayar * resources allocation queries should be atomic. Since several PFs can
39839c8517c4STomer Tayar * run in parallel - a resource lock is needed.
39849c8517c4STomer Tayar * If either the resource lock or resource set value commands are not
3985fe40a830SPrabhakar Kushwaha * supported - skip the max values setting, release the lock if
39869c8517c4STomer Tayar * needed, and proceed to the queries. Other failures, including a
39879c8517c4STomer Tayar * failure to acquire the lock, will cause this function to fail.
39889c8517c4STomer Tayar */
3989f470f22cSsudarsana.kalluru@cavium.com qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
3990f470f22cSsudarsana.kalluru@cavium.com QED_RESC_LOCK_RESC_ALLOC, false);
39919c8517c4STomer Tayar
39929c8517c4STomer Tayar rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
39939c8517c4STomer Tayar if (rc && rc != -EINVAL) {
39942edbff8dSTomer Tayar return rc;
39959c8517c4STomer Tayar } else if (rc == -EINVAL) {
39969c8517c4STomer Tayar DP_INFO(p_hwfn,
39979c8517c4STomer Tayar "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
3998036f590fSJean Sacren } else if (!resc_lock_params.b_granted) {
39999c8517c4STomer Tayar DP_NOTICE(p_hwfn,
40009c8517c4STomer Tayar "Failed to acquire the resource lock for the resource allocation commands\n");
40019c8517c4STomer Tayar return -EBUSY;
40029c8517c4STomer Tayar } else {
40039c8517c4STomer Tayar rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
40049c8517c4STomer Tayar if (rc && rc != -EINVAL) {
40059c8517c4STomer Tayar DP_NOTICE(p_hwfn,
40069c8517c4STomer Tayar "Failed to set the max values of the soft resources\n");
40079c8517c4STomer Tayar goto unlock_and_exit;
40089c8517c4STomer Tayar } else if (rc == -EINVAL) {
40099c8517c4STomer Tayar DP_INFO(p_hwfn,
40109c8517c4STomer Tayar "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
40119c8517c4STomer Tayar rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
40129c8517c4STomer Tayar &resc_unlock_params);
40139c8517c4STomer Tayar if (rc)
40149c8517c4STomer Tayar DP_INFO(p_hwfn,
40159c8517c4STomer Tayar "Failed to release the resource lock for the resource allocation commands\n");
40169c8517c4STomer Tayar }
40179c8517c4STomer Tayar }
40189c8517c4STomer Tayar
40199c8517c4STomer Tayar rc = qed_hw_set_resc_info(p_hwfn);
40209c8517c4STomer Tayar if (rc)
40219c8517c4STomer Tayar goto unlock_and_exit;
40229c8517c4STomer Tayar
40239c8517c4STomer Tayar if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
40249c8517c4STomer Tayar rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
40259c8517c4STomer Tayar if (rc)
40269c8517c4STomer Tayar DP_INFO(p_hwfn,
40279c8517c4STomer Tayar "Failed to release the resource lock for the resource allocation commands\n");
40282edbff8dSTomer Tayar }
4029dbb799c3SYuval Mintz
403079284adeSMichal Kalderon /* PPFID bitmap */
403179284adeSMichal Kalderon if (IS_LEAD_HWFN(p_hwfn)) {
403279284adeSMichal Kalderon rc = qed_hw_get_ppfid_bitmap(p_hwfn, p_ptt);
403379284adeSMichal Kalderon if (rc)
403479284adeSMichal Kalderon return rc;
403579284adeSMichal Kalderon }
403679284adeSMichal Kalderon
4037dbb799c3SYuval Mintz /* Sanity for ILT */
40389c79ddaaSMintz, Yuval if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
40399c79ddaaSMintz, Yuval (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
4040dbb799c3SYuval Mintz DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
4041dbb799c3SYuval Mintz RESC_START(p_hwfn, QED_ILT),
4042dbb799c3SYuval Mintz RESC_END(p_hwfn, QED_ILT) - 1);
4043dbb799c3SYuval Mintz return -EINVAL;
4044dbb799c3SYuval Mintz }
4045fe56b9e6SYuval Mintz
4046ebbdcc66SMintz, Yuval /* This will also learn the number of SBs from MFW */
4047ebbdcc66SMintz, Yuval if (qed_int_igu_reset_cam(p_hwfn, p_ptt))
4048ebbdcc66SMintz, Yuval return -EINVAL;
4049ebbdcc66SMintz, Yuval
405025c089d7SYuval Mintz qed_hw_set_feat(p_hwfn);
405125c089d7SYuval Mintz
40522edbff8dSTomer Tayar for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
40532edbff8dSTomer Tayar DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
40542edbff8dSTomer Tayar qed_hw_get_resc_name(res_id),
40552edbff8dSTomer Tayar RESC_NUM(p_hwfn, res_id),
40562edbff8dSTomer Tayar RESC_START(p_hwfn, res_id));
4057dbb799c3SYuval Mintz
4058dbb799c3SYuval Mintz return 0;
40599c8517c4STomer Tayar
40609c8517c4STomer Tayar unlock_and_exit:
40619c8517c4STomer Tayar if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
40629c8517c4STomer Tayar qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
40639c8517c4STomer Tayar return rc;
4064fe56b9e6SYuval Mintz }
4065fe56b9e6SYuval Mintz
qed_hw_get_nvm_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)40661a635e48SYuval Mintz static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
4067fe56b9e6SYuval Mintz {
406899785a87SAlexander Lobakin u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld;
40691e128c81SArun Easi u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
407099785a87SAlexander Lobakin struct qed_mcp_link_speed_params *ext_speed;
4071645874e5SSudarsana Reddy Kalluru struct qed_mcp_link_capabilities *p_caps;
4072cc875c2eSYuval Mintz struct qed_mcp_link_params *link;
407353916a67SIgor Russkikh int i;
4074fe56b9e6SYuval Mintz
4075fe56b9e6SYuval Mintz /* Read global nvm_cfg address */
4076fe56b9e6SYuval Mintz nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
4077fe56b9e6SYuval Mintz
4078fe56b9e6SYuval Mintz /* Verify MCP has initialized it */
4079fe56b9e6SYuval Mintz if (!nvm_cfg_addr) {
4080fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
4081fe56b9e6SYuval Mintz return -EINVAL;
4082fe56b9e6SYuval Mintz }
4083fe56b9e6SYuval Mintz
4084fe56b9e6SYuval Mintz /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
4085fe56b9e6SYuval Mintz nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
4086fe56b9e6SYuval Mintz
4087cc875c2eSYuval Mintz addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
4088cc875c2eSYuval Mintz offsetof(struct nvm_cfg1, glob) +
4089cc875c2eSYuval Mintz offsetof(struct nvm_cfg1_glob, core_cfg);
4090cc875c2eSYuval Mintz
4091cc875c2eSYuval Mintz core_cfg = qed_rd(p_hwfn, p_ptt, addr);
4092cc875c2eSYuval Mintz
4093cc875c2eSYuval Mintz switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
4094cc875c2eSYuval Mintz NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
4095351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
4096351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
4097351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
4098351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
4099351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
4100351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
4101351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
4102351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
41039c79ddaaSMintz, Yuval case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
4104351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
41059c79ddaaSMintz, Yuval case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
4106a396818cSAlexander Lobakin case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1:
4107a396818cSAlexander Lobakin case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1:
4108a396818cSAlexander Lobakin case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2:
4109a396818cSAlexander Lobakin case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2:
4110a396818cSAlexander Lobakin case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4:
41119c79ddaaSMintz, Yuval break;
4112cc875c2eSYuval Mintz default:
41131a635e48SYuval Mintz DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
4114cc875c2eSYuval Mintz break;
4115cc875c2eSYuval Mintz }
4116cc875c2eSYuval Mintz
4117cc875c2eSYuval Mintz /* Read default link configuration */
4118cc875c2eSYuval Mintz link = &p_hwfn->mcp_info->link_input;
4119645874e5SSudarsana Reddy Kalluru p_caps = &p_hwfn->mcp_info->link_capabilities;
4120cc875c2eSYuval Mintz port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
4121cc875c2eSYuval Mintz offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
4122cc875c2eSYuval Mintz link_temp = qed_rd(p_hwfn, p_ptt,
4123cc875c2eSYuval Mintz port_cfg_addr +
4124cc875c2eSYuval Mintz offsetof(struct nvm_cfg1_port, speed_cap_mask));
412583aeb933SYuval Mintz link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
412683aeb933SYuval Mintz link->speed.advertised_speeds = link_temp;
4127cc875c2eSYuval Mintz
412899785a87SAlexander Lobakin p_caps->speed_capabilities = link->speed.advertised_speeds;
4129cc875c2eSYuval Mintz
4130cc875c2eSYuval Mintz link_temp = qed_rd(p_hwfn, p_ptt,
4131cc875c2eSYuval Mintz port_cfg_addr +
4132cc875c2eSYuval Mintz offsetof(struct nvm_cfg1_port, link_settings));
4133cc875c2eSYuval Mintz switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
4134cc875c2eSYuval Mintz NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
4135cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
4136cc875c2eSYuval Mintz link->speed.autoneg = true;
4137cc875c2eSYuval Mintz break;
4138cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
4139cc875c2eSYuval Mintz link->speed.forced_speed = 1000;
4140cc875c2eSYuval Mintz break;
4141cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
4142cc875c2eSYuval Mintz link->speed.forced_speed = 10000;
4143cc875c2eSYuval Mintz break;
41445bf0961cSSudarsana Reddy Kalluru case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
41455bf0961cSSudarsana Reddy Kalluru link->speed.forced_speed = 20000;
41465bf0961cSSudarsana Reddy Kalluru break;
4147cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
4148cc875c2eSYuval Mintz link->speed.forced_speed = 25000;
4149cc875c2eSYuval Mintz break;
4150cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
4151cc875c2eSYuval Mintz link->speed.forced_speed = 40000;
4152cc875c2eSYuval Mintz break;
4153cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
4154cc875c2eSYuval Mintz link->speed.forced_speed = 50000;
4155cc875c2eSYuval Mintz break;
4156351a4dedSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
4157cc875c2eSYuval Mintz link->speed.forced_speed = 100000;
4158cc875c2eSYuval Mintz break;
4159cc875c2eSYuval Mintz default:
41601a635e48SYuval Mintz DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
4161cc875c2eSYuval Mintz }
4162cc875c2eSYuval Mintz
416399785a87SAlexander Lobakin p_caps->default_speed_autoneg = link->speed.autoneg;
416434f9199cSsudarsana.kalluru@cavium.com
416599785a87SAlexander Lobakin fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL);
416699785a87SAlexander Lobakin link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
416799785a87SAlexander Lobakin link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
416899785a87SAlexander Lobakin link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
4169cc875c2eSYuval Mintz link->loopback_mode = 0;
4170cc875c2eSYuval Mintz
4171ae7e6937SAlexander Lobakin if (p_hwfn->mcp_info->capabilities &
4172ae7e6937SAlexander Lobakin FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
4173ae7e6937SAlexander Lobakin switch (GET_MFW_FIELD(link_temp,
4174ae7e6937SAlexander Lobakin NVM_CFG1_PORT_FEC_FORCE_MODE)) {
4175ae7e6937SAlexander Lobakin case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE:
4176ae7e6937SAlexander Lobakin p_caps->fec_default |= QED_FEC_MODE_NONE;
4177ae7e6937SAlexander Lobakin break;
4178ae7e6937SAlexander Lobakin case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE:
4179ae7e6937SAlexander Lobakin p_caps->fec_default |= QED_FEC_MODE_FIRECODE;
4180ae7e6937SAlexander Lobakin break;
4181ae7e6937SAlexander Lobakin case NVM_CFG1_PORT_FEC_FORCE_MODE_RS:
4182ae7e6937SAlexander Lobakin p_caps->fec_default |= QED_FEC_MODE_RS;
4183ae7e6937SAlexander Lobakin break;
4184ae7e6937SAlexander Lobakin case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO:
4185ae7e6937SAlexander Lobakin p_caps->fec_default |= QED_FEC_MODE_AUTO;
4186ae7e6937SAlexander Lobakin break;
4187ae7e6937SAlexander Lobakin default:
4188ae7e6937SAlexander Lobakin DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4189ae7e6937SAlexander Lobakin "unknown FEC mode in 0x%08x\n", link_temp);
4190ae7e6937SAlexander Lobakin }
4191ae7e6937SAlexander Lobakin } else {
4192ae7e6937SAlexander Lobakin p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED;
4193ae7e6937SAlexander Lobakin }
4194ae7e6937SAlexander Lobakin
4195ae7e6937SAlexander Lobakin link->fec = p_caps->fec_default;
4196ae7e6937SAlexander Lobakin
4197645874e5SSudarsana Reddy Kalluru if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
4198645874e5SSudarsana Reddy Kalluru link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr +
4199645874e5SSudarsana Reddy Kalluru offsetof(struct nvm_cfg1_port, ext_phy));
4200645874e5SSudarsana Reddy Kalluru link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
4201645874e5SSudarsana Reddy Kalluru link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
4202645874e5SSudarsana Reddy Kalluru p_caps->default_eee = QED_MCP_EEE_ENABLED;
4203645874e5SSudarsana Reddy Kalluru link->eee.enable = true;
4204645874e5SSudarsana Reddy Kalluru switch (link_temp) {
4205645874e5SSudarsana Reddy Kalluru case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
4206645874e5SSudarsana Reddy Kalluru p_caps->default_eee = QED_MCP_EEE_DISABLED;
4207645874e5SSudarsana Reddy Kalluru link->eee.enable = false;
4208645874e5SSudarsana Reddy Kalluru break;
4209645874e5SSudarsana Reddy Kalluru case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
4210645874e5SSudarsana Reddy Kalluru p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
4211645874e5SSudarsana Reddy Kalluru break;
4212645874e5SSudarsana Reddy Kalluru case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
4213645874e5SSudarsana Reddy Kalluru p_caps->eee_lpi_timer =
4214645874e5SSudarsana Reddy Kalluru EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
4215645874e5SSudarsana Reddy Kalluru break;
4216645874e5SSudarsana Reddy Kalluru case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
4217645874e5SSudarsana Reddy Kalluru p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
4218645874e5SSudarsana Reddy Kalluru break;
4219645874e5SSudarsana Reddy Kalluru }
4220645874e5SSudarsana Reddy Kalluru
4221645874e5SSudarsana Reddy Kalluru link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
4222645874e5SSudarsana Reddy Kalluru link->eee.tx_lpi_enable = link->eee.enable;
4223645874e5SSudarsana Reddy Kalluru link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV;
4224645874e5SSudarsana Reddy Kalluru } else {
4225645874e5SSudarsana Reddy Kalluru p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED;
4226645874e5SSudarsana Reddy Kalluru }
4227645874e5SSudarsana Reddy Kalluru
422899785a87SAlexander Lobakin if (p_hwfn->mcp_info->capabilities &
422999785a87SAlexander Lobakin FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
423099785a87SAlexander Lobakin ext_speed = &link->ext_speed;
423199785a87SAlexander Lobakin
423299785a87SAlexander Lobakin link_temp = qed_rd(p_hwfn, p_ptt,
423399785a87SAlexander Lobakin port_cfg_addr +
423499785a87SAlexander Lobakin offsetof(struct nvm_cfg1_port,
423599785a87SAlexander Lobakin extended_speed));
423699785a87SAlexander Lobakin
423799785a87SAlexander Lobakin fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED);
423899785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN)
423999785a87SAlexander Lobakin ext_speed->autoneg = true;
424099785a87SAlexander Lobakin
424199785a87SAlexander Lobakin ext_speed->forced_speed = 0;
424299785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G)
424399785a87SAlexander Lobakin ext_speed->forced_speed |= QED_EXT_SPEED_1G;
424499785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G)
424599785a87SAlexander Lobakin ext_speed->forced_speed |= QED_EXT_SPEED_10G;
424699785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G)
424799785a87SAlexander Lobakin ext_speed->forced_speed |= QED_EXT_SPEED_20G;
424899785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G)
424999785a87SAlexander Lobakin ext_speed->forced_speed |= QED_EXT_SPEED_25G;
425099785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G)
425199785a87SAlexander Lobakin ext_speed->forced_speed |= QED_EXT_SPEED_40G;
425299785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R)
425399785a87SAlexander Lobakin ext_speed->forced_speed |= QED_EXT_SPEED_50G_R;
425499785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2)
425599785a87SAlexander Lobakin ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2;
425699785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2)
425799785a87SAlexander Lobakin ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2;
425899785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4)
425999785a87SAlexander Lobakin ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4;
426099785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4)
426199785a87SAlexander Lobakin ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4;
426299785a87SAlexander Lobakin
426399785a87SAlexander Lobakin fld = GET_MFW_FIELD(link_temp,
426499785a87SAlexander Lobakin NVM_CFG1_PORT_EXTENDED_SPEED_CAP);
426599785a87SAlexander Lobakin
426699785a87SAlexander Lobakin ext_speed->advertised_speeds = 0;
426799785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED)
426899785a87SAlexander Lobakin ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES;
426999785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G)
427099785a87SAlexander Lobakin ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G;
427199785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G)
427299785a87SAlexander Lobakin ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G;
427399785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G)
427499785a87SAlexander Lobakin ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G;
427599785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G)
427699785a87SAlexander Lobakin ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G;
427799785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G)
427899785a87SAlexander Lobakin ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G;
427999785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R)
428099785a87SAlexander Lobakin ext_speed->advertised_speeds |=
428199785a87SAlexander Lobakin QED_EXT_SPEED_MASK_50G_R;
428299785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2)
428399785a87SAlexander Lobakin ext_speed->advertised_speeds |=
428499785a87SAlexander Lobakin QED_EXT_SPEED_MASK_50G_R2;
428599785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2)
428699785a87SAlexander Lobakin ext_speed->advertised_speeds |=
428799785a87SAlexander Lobakin QED_EXT_SPEED_MASK_100G_R2;
428899785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4)
428999785a87SAlexander Lobakin ext_speed->advertised_speeds |=
429099785a87SAlexander Lobakin QED_EXT_SPEED_MASK_100G_R4;
429199785a87SAlexander Lobakin if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4)
429299785a87SAlexander Lobakin ext_speed->advertised_speeds |=
429399785a87SAlexander Lobakin QED_EXT_SPEED_MASK_100G_P4;
429499785a87SAlexander Lobakin
429599785a87SAlexander Lobakin link_temp = qed_rd(p_hwfn, p_ptt,
429699785a87SAlexander Lobakin port_cfg_addr +
429799785a87SAlexander Lobakin offsetof(struct nvm_cfg1_port,
429899785a87SAlexander Lobakin extended_fec_mode));
429999785a87SAlexander Lobakin link->ext_fec_mode = link_temp;
430099785a87SAlexander Lobakin
430199785a87SAlexander Lobakin p_caps->default_ext_speed_caps = ext_speed->advertised_speeds;
430299785a87SAlexander Lobakin p_caps->default_ext_speed = ext_speed->forced_speed;
430399785a87SAlexander Lobakin p_caps->default_ext_autoneg = ext_speed->autoneg;
430499785a87SAlexander Lobakin p_caps->default_ext_fec = link->ext_fec_mode;
430599785a87SAlexander Lobakin
430699785a87SAlexander Lobakin DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
430799785a87SAlexander Lobakin "Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n",
430899785a87SAlexander Lobakin ext_speed->forced_speed,
430999785a87SAlexander Lobakin ext_speed->advertised_speeds, ext_speed->autoneg,
431099785a87SAlexander Lobakin p_caps->default_ext_fec);
431199785a87SAlexander Lobakin }
431299785a87SAlexander Lobakin
4313ae7e6937SAlexander Lobakin DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4314ae7e6937SAlexander Lobakin "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n",
4315ae7e6937SAlexander Lobakin link->speed.forced_speed, link->speed.advertised_speeds,
4316ae7e6937SAlexander Lobakin link->speed.autoneg, link->pause.autoneg,
4317ae7e6937SAlexander Lobakin p_caps->default_eee, p_caps->eee_lpi_timer,
4318ae7e6937SAlexander Lobakin p_caps->fec_default);
4319cc875c2eSYuval Mintz
4320b51bdfb9SSudarsana Reddy Kalluru if (IS_LEAD_HWFN(p_hwfn)) {
4321b51bdfb9SSudarsana Reddy Kalluru struct qed_dev *cdev = p_hwfn->cdev;
4322b51bdfb9SSudarsana Reddy Kalluru
4323fe56b9e6SYuval Mintz /* Read Multi-function information from shmem */
4324fe56b9e6SYuval Mintz addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
4325fe56b9e6SYuval Mintz offsetof(struct nvm_cfg1, glob) +
4326fe56b9e6SYuval Mintz offsetof(struct nvm_cfg1_glob, generic_cont0);
4327fe56b9e6SYuval Mintz
4328fe56b9e6SYuval Mintz generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
4329fe56b9e6SYuval Mintz
4330fe56b9e6SYuval Mintz mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
4331fe56b9e6SYuval Mintz NVM_CFG1_GLOB_MF_MODE_OFFSET;
4332fe56b9e6SYuval Mintz
4333fe56b9e6SYuval Mintz switch (mf_mode) {
4334fe56b9e6SYuval Mintz case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
4335b51bdfb9SSudarsana Reddy Kalluru cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS);
4336b51bdfb9SSudarsana Reddy Kalluru break;
4337cac6f691SSudarsana Reddy Kalluru case NVM_CFG1_GLOB_MF_MODE_UFP:
4338cac6f691SSudarsana Reddy Kalluru cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
4339cac6f691SSudarsana Reddy Kalluru BIT(QED_MF_LLH_PROTO_CLSS) |
4340cac6f691SSudarsana Reddy Kalluru BIT(QED_MF_UFP_SPECIFIC) |
43411a3ca250SSudarsana Reddy Kalluru BIT(QED_MF_8021Q_TAGGING) |
43421a3ca250SSudarsana Reddy Kalluru BIT(QED_MF_DONT_ADD_VLAN0_TAG);
4343cac6f691SSudarsana Reddy Kalluru break;
4344b51bdfb9SSudarsana Reddy Kalluru case NVM_CFG1_GLOB_MF_MODE_BD:
4345b51bdfb9SSudarsana Reddy Kalluru cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
4346b51bdfb9SSudarsana Reddy Kalluru BIT(QED_MF_LLH_PROTO_CLSS) |
43471a3ca250SSudarsana Reddy Kalluru BIT(QED_MF_8021AD_TAGGING) |
43481a3ca250SSudarsana Reddy Kalluru BIT(QED_MF_DONT_ADD_VLAN0_TAG);
4349fe56b9e6SYuval Mintz break;
4350fe56b9e6SYuval Mintz case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
4351b51bdfb9SSudarsana Reddy Kalluru cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
43520bc5fe85SSudarsana Reddy Kalluru BIT(QED_MF_LLH_PROTO_CLSS) |
43530bc5fe85SSudarsana Reddy Kalluru BIT(QED_MF_LL2_NON_UNICAST) |
43542d2fe843SDmitry Bogdanov BIT(QED_MF_INTER_PF_SWITCH) |
43552d2fe843SDmitry Bogdanov BIT(QED_MF_DISABLE_ARFS);
4356fe56b9e6SYuval Mintz break;
4357fc48b7a6SYuval Mintz case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
4358b51bdfb9SSudarsana Reddy Kalluru cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
43590bc5fe85SSudarsana Reddy Kalluru BIT(QED_MF_LLH_PROTO_CLSS) |
43600bc5fe85SSudarsana Reddy Kalluru BIT(QED_MF_LL2_NON_UNICAST);
43610bc5fe85SSudarsana Reddy Kalluru if (QED_IS_BB(p_hwfn->cdev))
4362b51bdfb9SSudarsana Reddy Kalluru cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF);
4363fe56b9e6SYuval Mintz break;
4364fe56b9e6SYuval Mintz }
43650bc5fe85SSudarsana Reddy Kalluru
43660bc5fe85SSudarsana Reddy Kalluru DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
4367b51bdfb9SSudarsana Reddy Kalluru cdev->mf_bits);
43682d2fe843SDmitry Bogdanov
43692d2fe843SDmitry Bogdanov /* In CMT the PF is unknown when the GFS block processes the
43702d2fe843SDmitry Bogdanov * packet. Therefore cannot use searcher as it has a per PF
43712d2fe843SDmitry Bogdanov * database, and thus ARFS must be disabled.
43722d2fe843SDmitry Bogdanov *
43732d2fe843SDmitry Bogdanov */
43742d2fe843SDmitry Bogdanov if (QED_IS_CMT(cdev))
43752d2fe843SDmitry Bogdanov cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS);
4376b51bdfb9SSudarsana Reddy Kalluru }
4377b51bdfb9SSudarsana Reddy Kalluru
4378b51bdfb9SSudarsana Reddy Kalluru DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
43790bc5fe85SSudarsana Reddy Kalluru p_hwfn->cdev->mf_bits);
4380fe56b9e6SYuval Mintz
4381b51bdfb9SSudarsana Reddy Kalluru /* Read device capabilities information from shmem */
4382fc48b7a6SYuval Mintz addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
4383fc48b7a6SYuval Mintz offsetof(struct nvm_cfg1, glob) +
4384fc48b7a6SYuval Mintz offsetof(struct nvm_cfg1_glob, device_capabilities);
4385fc48b7a6SYuval Mintz
4386fc48b7a6SYuval Mintz device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
4387fc48b7a6SYuval Mintz if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
4388fc48b7a6SYuval Mintz __set_bit(QED_DEV_CAP_ETH,
4389fc48b7a6SYuval Mintz &p_hwfn->hw_info.device_capabilities);
43901e128c81SArun Easi if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
43911e128c81SArun Easi __set_bit(QED_DEV_CAP_FCOE,
43921e128c81SArun Easi &p_hwfn->hw_info.device_capabilities);
4393c5ac9319SYuval Mintz if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
4394c5ac9319SYuval Mintz __set_bit(QED_DEV_CAP_ISCSI,
4395c5ac9319SYuval Mintz &p_hwfn->hw_info.device_capabilities);
4396c5ac9319SYuval Mintz if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
4397c5ac9319SYuval Mintz __set_bit(QED_DEV_CAP_ROCE,
4398c5ac9319SYuval Mintz &p_hwfn->hw_info.device_capabilities);
4399fc48b7a6SYuval Mintz
440053916a67SIgor Russkikh /* Read device serial number information from shmem */
440153916a67SIgor Russkikh addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
440253916a67SIgor Russkikh offsetof(struct nvm_cfg1, glob) +
440353916a67SIgor Russkikh offsetof(struct nvm_cfg1_glob, serial_number);
440453916a67SIgor Russkikh
440553916a67SIgor Russkikh for (i = 0; i < 4; i++)
440653916a67SIgor Russkikh p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4);
440753916a67SIgor Russkikh
4408fe56b9e6SYuval Mintz return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
4409fe56b9e6SYuval Mintz }
4410fe56b9e6SYuval Mintz
qed_get_num_funcs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)44111408cc1fSYuval Mintz static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
44121408cc1fSYuval Mintz {
4413dbb799c3SYuval Mintz u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
4414dbb799c3SYuval Mintz u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
44159c79ddaaSMintz, Yuval struct qed_dev *cdev = p_hwfn->cdev;
44161408cc1fSYuval Mintz
44179c79ddaaSMintz, Yuval num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
44181408cc1fSYuval Mintz
44191408cc1fSYuval Mintz /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
44201408cc1fSYuval Mintz * in the other bits are selected.
44211408cc1fSYuval Mintz * Bits 1-15 are for functions 1-15, respectively, and their value is
44221408cc1fSYuval Mintz * '0' only for enabled functions (function 0 always exists and
44231408cc1fSYuval Mintz * enabled).
44241408cc1fSYuval Mintz * In case of CMT, only the "even" functions are enabled, and thus the
44251408cc1fSYuval Mintz * number of functions for both hwfns is learnt from the same bits.
44261408cc1fSYuval Mintz */
44271408cc1fSYuval Mintz reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
44281408cc1fSYuval Mintz
44291408cc1fSYuval Mintz if (reg_function_hide & 0x1) {
44309c79ddaaSMintz, Yuval if (QED_IS_BB(cdev)) {
44319c79ddaaSMintz, Yuval if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
44321408cc1fSYuval Mintz num_funcs = 0;
44331408cc1fSYuval Mintz eng_mask = 0xaaaa;
44341408cc1fSYuval Mintz } else {
44351408cc1fSYuval Mintz num_funcs = 1;
44361408cc1fSYuval Mintz eng_mask = 0x5554;
44371408cc1fSYuval Mintz }
44389c79ddaaSMintz, Yuval } else {
44399c79ddaaSMintz, Yuval num_funcs = 1;
44409c79ddaaSMintz, Yuval eng_mask = 0xfffe;
44419c79ddaaSMintz, Yuval }
44421408cc1fSYuval Mintz
44431408cc1fSYuval Mintz /* Get the number of the enabled functions on the engine */
44441408cc1fSYuval Mintz tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
44451408cc1fSYuval Mintz while (tmp) {
44461408cc1fSYuval Mintz if (tmp & 0x1)
44471408cc1fSYuval Mintz num_funcs++;
44481408cc1fSYuval Mintz tmp >>= 0x1;
44491408cc1fSYuval Mintz }
4450dbb799c3SYuval Mintz
4451dbb799c3SYuval Mintz /* Get the PF index within the enabled functions */
4452dbb799c3SYuval Mintz low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
4453dbb799c3SYuval Mintz tmp = reg_function_hide & eng_mask & low_pfs_mask;
4454dbb799c3SYuval Mintz while (tmp) {
4455dbb799c3SYuval Mintz if (tmp & 0x1)
4456dbb799c3SYuval Mintz enabled_func_idx--;
4457dbb799c3SYuval Mintz tmp >>= 0x1;
4458dbb799c3SYuval Mintz }
44591408cc1fSYuval Mintz }
44601408cc1fSYuval Mintz
44611408cc1fSYuval Mintz p_hwfn->num_funcs_on_engine = num_funcs;
4462dbb799c3SYuval Mintz p_hwfn->enabled_func_idx = enabled_func_idx;
44631408cc1fSYuval Mintz
44641408cc1fSYuval Mintz DP_VERBOSE(p_hwfn,
44651408cc1fSYuval Mintz NETIF_MSG_PROBE,
4466525ef5c0SYuval Mintz "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
44671408cc1fSYuval Mintz p_hwfn->rel_pf_id,
44681408cc1fSYuval Mintz p_hwfn->abs_pf_id,
4469525ef5c0SYuval Mintz p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
44701408cc1fSYuval Mintz }
44711408cc1fSYuval Mintz
qed_hw_info_port_num(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)44729c79ddaaSMintz, Yuval static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
44739c79ddaaSMintz, Yuval {
44740ebcebbeSSudarsana Reddy Kalluru u32 addr, global_offsize, global_addr, port_mode;
44750ebcebbeSSudarsana Reddy Kalluru struct qed_dev *cdev = p_hwfn->cdev;
44760ebcebbeSSudarsana Reddy Kalluru
44770ebcebbeSSudarsana Reddy Kalluru /* In CMT there is always only one port */
44780ebcebbeSSudarsana Reddy Kalluru if (cdev->num_hwfns > 1) {
44790ebcebbeSSudarsana Reddy Kalluru cdev->num_ports_in_engine = 1;
44800ebcebbeSSudarsana Reddy Kalluru cdev->num_ports = 1;
44810ebcebbeSSudarsana Reddy Kalluru return;
44820ebcebbeSSudarsana Reddy Kalluru }
44830ebcebbeSSudarsana Reddy Kalluru
44840ebcebbeSSudarsana Reddy Kalluru /* Determine the number of ports per engine */
44850ebcebbeSSudarsana Reddy Kalluru port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE);
44860ebcebbeSSudarsana Reddy Kalluru switch (port_mode) {
44870ebcebbeSSudarsana Reddy Kalluru case 0x0:
44880ebcebbeSSudarsana Reddy Kalluru cdev->num_ports_in_engine = 1;
44890ebcebbeSSudarsana Reddy Kalluru break;
44900ebcebbeSSudarsana Reddy Kalluru case 0x1:
44910ebcebbeSSudarsana Reddy Kalluru cdev->num_ports_in_engine = 2;
44920ebcebbeSSudarsana Reddy Kalluru break;
44930ebcebbeSSudarsana Reddy Kalluru case 0x2:
44940ebcebbeSSudarsana Reddy Kalluru cdev->num_ports_in_engine = 4;
44950ebcebbeSSudarsana Reddy Kalluru break;
44960ebcebbeSSudarsana Reddy Kalluru default:
44970ebcebbeSSudarsana Reddy Kalluru DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode);
44980ebcebbeSSudarsana Reddy Kalluru cdev->num_ports_in_engine = 1; /* Default to something */
44990ebcebbeSSudarsana Reddy Kalluru break;
45000ebcebbeSSudarsana Reddy Kalluru }
45010ebcebbeSSudarsana Reddy Kalluru
45020ebcebbeSSudarsana Reddy Kalluru /* Get the total number of ports of the device */
45030ebcebbeSSudarsana Reddy Kalluru addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
45040ebcebbeSSudarsana Reddy Kalluru PUBLIC_GLOBAL);
45050ebcebbeSSudarsana Reddy Kalluru global_offsize = qed_rd(p_hwfn, p_ptt, addr);
45060ebcebbeSSudarsana Reddy Kalluru global_addr = SECTION_ADDR(global_offsize, 0);
45070ebcebbeSSudarsana Reddy Kalluru addr = global_addr + offsetof(struct public_global, max_ports);
45080ebcebbeSSudarsana Reddy Kalluru cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr);
45099c79ddaaSMintz, Yuval }
45109c79ddaaSMintz, Yuval
qed_get_eee_caps(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)4511645874e5SSudarsana Reddy Kalluru static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
4512645874e5SSudarsana Reddy Kalluru {
4513645874e5SSudarsana Reddy Kalluru struct qed_mcp_link_capabilities *p_caps;
4514645874e5SSudarsana Reddy Kalluru u32 eee_status;
4515645874e5SSudarsana Reddy Kalluru
4516645874e5SSudarsana Reddy Kalluru p_caps = &p_hwfn->mcp_info->link_capabilities;
4517645874e5SSudarsana Reddy Kalluru if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED)
4518645874e5SSudarsana Reddy Kalluru return;
4519645874e5SSudarsana Reddy Kalluru
4520645874e5SSudarsana Reddy Kalluru p_caps->eee_speed_caps = 0;
4521645874e5SSudarsana Reddy Kalluru eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
4522645874e5SSudarsana Reddy Kalluru offsetof(struct public_port, eee_status));
4523645874e5SSudarsana Reddy Kalluru eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
4524645874e5SSudarsana Reddy Kalluru EEE_SUPPORTED_SPEED_OFFSET;
4525645874e5SSudarsana Reddy Kalluru
4526645874e5SSudarsana Reddy Kalluru if (eee_status & EEE_1G_SUPPORTED)
4527645874e5SSudarsana Reddy Kalluru p_caps->eee_speed_caps |= QED_EEE_1G_ADV;
4528645874e5SSudarsana Reddy Kalluru if (eee_status & EEE_10G_ADV)
4529645874e5SSudarsana Reddy Kalluru p_caps->eee_speed_caps |= QED_EEE_10G_ADV;
4530645874e5SSudarsana Reddy Kalluru }
4531645874e5SSudarsana Reddy Kalluru
45329c79ddaaSMintz, Yuval static int
qed_get_hw_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_pci_personality personality)45339c79ddaaSMintz, Yuval qed_get_hw_info(struct qed_hwfn *p_hwfn,
45349c79ddaaSMintz, Yuval struct qed_ptt *p_ptt,
45359c79ddaaSMintz, Yuval enum qed_pci_personality personality)
45369c79ddaaSMintz, Yuval {
45379c79ddaaSMintz, Yuval int rc;
45389c79ddaaSMintz, Yuval
45399c79ddaaSMintz, Yuval /* Since all information is common, only first hwfns should do this */
45409c79ddaaSMintz, Yuval if (IS_LEAD_HWFN(p_hwfn)) {
45419c79ddaaSMintz, Yuval rc = qed_iov_hw_info(p_hwfn);
45429c79ddaaSMintz, Yuval if (rc)
45439c79ddaaSMintz, Yuval return rc;
45449c79ddaaSMintz, Yuval }
45459c79ddaaSMintz, Yuval
45460ebcebbeSSudarsana Reddy Kalluru if (IS_LEAD_HWFN(p_hwfn))
45479c79ddaaSMintz, Yuval qed_hw_info_port_num(p_hwfn, p_ptt);
4548fe56b9e6SYuval Mintz
4549645874e5SSudarsana Reddy Kalluru qed_mcp_get_capabilities(p_hwfn, p_ptt);
4550645874e5SSudarsana Reddy Kalluru
4551fe56b9e6SYuval Mintz qed_hw_get_nvm_info(p_hwfn, p_ptt);
4552fe56b9e6SYuval Mintz
4553fe56b9e6SYuval Mintz rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
4554fe56b9e6SYuval Mintz if (rc)
4555fe56b9e6SYuval Mintz return rc;
4556fe56b9e6SYuval Mintz
4557fe56b9e6SYuval Mintz if (qed_mcp_is_init(p_hwfn))
4558fe56b9e6SYuval Mintz ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
4559fe56b9e6SYuval Mintz p_hwfn->mcp_info->func_info.mac);
4560fe56b9e6SYuval Mintz else
4561fe56b9e6SYuval Mintz eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
4562fe56b9e6SYuval Mintz
4563fe56b9e6SYuval Mintz if (qed_mcp_is_init(p_hwfn)) {
4564fe56b9e6SYuval Mintz if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
4565fe56b9e6SYuval Mintz p_hwfn->hw_info.ovlan =
4566fe56b9e6SYuval Mintz p_hwfn->mcp_info->func_info.ovlan;
4567fe56b9e6SYuval Mintz
4568fe56b9e6SYuval Mintz qed_mcp_cmd_port_init(p_hwfn, p_ptt);
4569645874e5SSudarsana Reddy Kalluru
4570645874e5SSudarsana Reddy Kalluru qed_get_eee_caps(p_hwfn, p_ptt);
4571cac6f691SSudarsana Reddy Kalluru
4572cac6f691SSudarsana Reddy Kalluru qed_mcp_read_ufp_config(p_hwfn, p_ptt);
4573fe56b9e6SYuval Mintz }
4574fe56b9e6SYuval Mintz
4575fe56b9e6SYuval Mintz if (qed_mcp_is_init(p_hwfn)) {
4576fe56b9e6SYuval Mintz enum qed_pci_personality protocol;
4577fe56b9e6SYuval Mintz
4578fe56b9e6SYuval Mintz protocol = p_hwfn->mcp_info->func_info.protocol;
4579fe56b9e6SYuval Mintz p_hwfn->hw_info.personality = protocol;
4580fe56b9e6SYuval Mintz }
4581fe56b9e6SYuval Mintz
458261be82b0SDenis Bolotin if (QED_IS_ROCE_PERSONALITY(p_hwfn))
458382ebc889SJason Yan p_hwfn->hw_info.multi_tc_roce_en = true;
458461be82b0SDenis Bolotin
4585b5a9ee7cSAriel Elior p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
4586b5a9ee7cSAriel Elior p_hwfn->hw_info.num_active_tc = 1;
4587b5a9ee7cSAriel Elior
45881408cc1fSYuval Mintz qed_get_num_funcs(p_hwfn, p_ptt);
45891408cc1fSYuval Mintz
45900fefbfbaSSudarsana Kalluru if (qed_mcp_is_init(p_hwfn))
45910fefbfbaSSudarsana Kalluru p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
45920fefbfbaSSudarsana Kalluru
45939c8517c4STomer Tayar return qed_hw_get_resc(p_hwfn, p_ptt);
4594fe56b9e6SYuval Mintz }
4595fe56b9e6SYuval Mintz
qed_get_dev_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)459615582962SRahul Verma static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
4597fe56b9e6SYuval Mintz {
459815582962SRahul Verma struct qed_dev *cdev = p_hwfn->cdev;
45999c79ddaaSMintz, Yuval u16 device_id_mask;
4600fe56b9e6SYuval Mintz u32 tmp;
4601fe56b9e6SYuval Mintz
4602fc48b7a6SYuval Mintz /* Read Vendor Id / Device Id */
46031a635e48SYuval Mintz pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
46041a635e48SYuval Mintz pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
46051a635e48SYuval Mintz
46069c79ddaaSMintz, Yuval /* Determine type */
46079c79ddaaSMintz, Yuval device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
46089c79ddaaSMintz, Yuval switch (device_id_mask) {
46099c79ddaaSMintz, Yuval case QED_DEV_ID_MASK_BB:
46109c79ddaaSMintz, Yuval cdev->type = QED_DEV_TYPE_BB;
46119c79ddaaSMintz, Yuval break;
46129c79ddaaSMintz, Yuval case QED_DEV_ID_MASK_AH:
46139c79ddaaSMintz, Yuval cdev->type = QED_DEV_TYPE_AH;
46149c79ddaaSMintz, Yuval break;
46159c79ddaaSMintz, Yuval default:
46169c79ddaaSMintz, Yuval DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
46179c79ddaaSMintz, Yuval return -EBUSY;
46189c79ddaaSMintz, Yuval }
46199c79ddaaSMintz, Yuval
462015582962SRahul Verma cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
462115582962SRahul Verma cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
462215582962SRahul Verma
4623fe56b9e6SYuval Mintz MASK_FIELD(CHIP_REV, cdev->chip_rev);
4624fe56b9e6SYuval Mintz
4625fe56b9e6SYuval Mintz /* Learn number of HW-functions */
462615582962SRahul Verma tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
4627fe56b9e6SYuval Mintz
4628fc48b7a6SYuval Mintz if (tmp & (1 << p_hwfn->rel_pf_id)) {
4629fe56b9e6SYuval Mintz DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
4630fe56b9e6SYuval Mintz cdev->num_hwfns = 2;
4631fe56b9e6SYuval Mintz } else {
4632fe56b9e6SYuval Mintz cdev->num_hwfns = 1;
4633fe56b9e6SYuval Mintz }
4634fe56b9e6SYuval Mintz
463515582962SRahul Verma cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt,
4636fe56b9e6SYuval Mintz MISCS_REG_CHIP_TEST_REG) >> 4;
4637fe56b9e6SYuval Mintz MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
463815582962SRahul Verma cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
4639fe56b9e6SYuval Mintz MASK_FIELD(CHIP_METAL, cdev->chip_metal);
4640fe56b9e6SYuval Mintz
4641fe56b9e6SYuval Mintz DP_INFO(cdev->hwfns,
46429c79ddaaSMintz, Yuval "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
46439c79ddaaSMintz, Yuval QED_IS_BB(cdev) ? "BB" : "AH",
46449c79ddaaSMintz, Yuval 'A' + cdev->chip_rev,
46459c79ddaaSMintz, Yuval (int)cdev->chip_metal,
4646fe56b9e6SYuval Mintz cdev->chip_num, cdev->chip_rev,
4647fe56b9e6SYuval Mintz cdev->chip_bond_id, cdev->chip_metal);
464812e09c69SYuval Mintz
464912e09c69SYuval Mintz return 0;
4650fe56b9e6SYuval Mintz }
4651fe56b9e6SYuval Mintz
qed_hw_prepare_single(struct qed_hwfn * p_hwfn,void __iomem * p_regview,void __iomem * p_doorbells,u64 db_phys_addr,enum qed_pci_personality personality)4652fe56b9e6SYuval Mintz static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
4653fe56b9e6SYuval Mintz void __iomem *p_regview,
4654fe56b9e6SYuval Mintz void __iomem *p_doorbells,
46558366d520SMichal Kalderon u64 db_phys_addr,
4656fe56b9e6SYuval Mintz enum qed_pci_personality personality)
4657fe56b9e6SYuval Mintz {
465864515dc8STomer Tayar struct qed_dev *cdev = p_hwfn->cdev;
4659fe56b9e6SYuval Mintz int rc = 0;
4660fe56b9e6SYuval Mintz
4661fe56b9e6SYuval Mintz /* Split PCI bars evenly between hwfns */
4662fe56b9e6SYuval Mintz p_hwfn->regview = p_regview;
4663fe56b9e6SYuval Mintz p_hwfn->doorbells = p_doorbells;
46648366d520SMichal Kalderon p_hwfn->db_phys_addr = db_phys_addr;
4665fe56b9e6SYuval Mintz
46661408cc1fSYuval Mintz if (IS_VF(p_hwfn->cdev))
46671408cc1fSYuval Mintz return qed_vf_hw_prepare(p_hwfn);
46681408cc1fSYuval Mintz
4669fe56b9e6SYuval Mintz /* Validate that chip access is feasible */
4670fe56b9e6SYuval Mintz if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
4671fe56b9e6SYuval Mintz DP_ERR(p_hwfn,
4672fe56b9e6SYuval Mintz "Reading the ME register returns all Fs; Preventing further chip access\n");
4673fe56b9e6SYuval Mintz return -EINVAL;
4674fe56b9e6SYuval Mintz }
4675fe56b9e6SYuval Mintz
4676fe56b9e6SYuval Mintz get_function_id(p_hwfn);
4677fe56b9e6SYuval Mintz
467812e09c69SYuval Mintz /* Allocate PTT pool */
467912e09c69SYuval Mintz rc = qed_ptt_pool_alloc(p_hwfn);
46802591c280SJoe Perches if (rc)
4681fe56b9e6SYuval Mintz goto err0;
4682fe56b9e6SYuval Mintz
468312e09c69SYuval Mintz /* Allocate the main PTT */
468412e09c69SYuval Mintz p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
468512e09c69SYuval Mintz
4686fe56b9e6SYuval Mintz /* First hwfn learns basic information, e.g., number of hwfns */
468712e09c69SYuval Mintz if (!p_hwfn->my_id) {
468815582962SRahul Verma rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
46891a635e48SYuval Mintz if (rc)
469012e09c69SYuval Mintz goto err1;
469112e09c69SYuval Mintz }
469212e09c69SYuval Mintz
469312e09c69SYuval Mintz qed_hw_hwfn_prepare(p_hwfn);
4694fe56b9e6SYuval Mintz
4695fe56b9e6SYuval Mintz /* Initialize MCP structure */
4696fe56b9e6SYuval Mintz rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
4697fe56b9e6SYuval Mintz if (rc) {
4698fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
4699fe56b9e6SYuval Mintz goto err1;
4700fe56b9e6SYuval Mintz }
4701fe56b9e6SYuval Mintz
4702fe56b9e6SYuval Mintz /* Read the device configuration information from the HW and SHMEM */
4703fe56b9e6SYuval Mintz rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
4704fe56b9e6SYuval Mintz if (rc) {
4705fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Failed to get HW information\n");
4706fe56b9e6SYuval Mintz goto err2;
4707fe56b9e6SYuval Mintz }
4708fe56b9e6SYuval Mintz
470918a69e36SMintz, Yuval /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
471018a69e36SMintz, Yuval * is called as it sets the ports number in an engine.
471118a69e36SMintz, Yuval */
471264515dc8STomer Tayar if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) {
471318a69e36SMintz, Yuval rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
471418a69e36SMintz, Yuval if (rc)
471518a69e36SMintz, Yuval DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
471618a69e36SMintz, Yuval }
471718a69e36SMintz, Yuval
471843645ce0SSudarsana Reddy Kalluru /* NVRAM info initialization and population */
471943645ce0SSudarsana Reddy Kalluru if (IS_LEAD_HWFN(p_hwfn)) {
472043645ce0SSudarsana Reddy Kalluru rc = qed_mcp_nvm_info_populate(p_hwfn);
472143645ce0SSudarsana Reddy Kalluru if (rc) {
472243645ce0SSudarsana Reddy Kalluru DP_NOTICE(p_hwfn,
472343645ce0SSudarsana Reddy Kalluru "Failed to populate nvm info shadow\n");
472443645ce0SSudarsana Reddy Kalluru goto err2;
472543645ce0SSudarsana Reddy Kalluru }
472643645ce0SSudarsana Reddy Kalluru }
472743645ce0SSudarsana Reddy Kalluru
4728fe56b9e6SYuval Mintz /* Allocate the init RT array and initialize the init-ops engine */
4729fe56b9e6SYuval Mintz rc = qed_init_alloc(p_hwfn);
47302591c280SJoe Perches if (rc)
473143645ce0SSudarsana Reddy Kalluru goto err3;
4732fe56b9e6SYuval Mintz
4733fe56b9e6SYuval Mintz return rc;
473443645ce0SSudarsana Reddy Kalluru err3:
473543645ce0SSudarsana Reddy Kalluru if (IS_LEAD_HWFN(p_hwfn))
473613cf8aabSSudarsana Reddy Kalluru qed_mcp_nvm_info_free(p_hwfn);
4737fe56b9e6SYuval Mintz err2:
473832a47e72SYuval Mintz if (IS_LEAD_HWFN(p_hwfn))
473932a47e72SYuval Mintz qed_iov_free_hw_info(p_hwfn->cdev);
4740fe56b9e6SYuval Mintz qed_mcp_free(p_hwfn);
4741fe56b9e6SYuval Mintz err1:
4742fe56b9e6SYuval Mintz qed_hw_hwfn_free(p_hwfn);
4743fe56b9e6SYuval Mintz err0:
4744fe56b9e6SYuval Mintz return rc;
4745fe56b9e6SYuval Mintz }
4746fe56b9e6SYuval Mintz
qed_hw_prepare(struct qed_dev * cdev,int personality)4747fe56b9e6SYuval Mintz int qed_hw_prepare(struct qed_dev *cdev,
4748fe56b9e6SYuval Mintz int personality)
4749fe56b9e6SYuval Mintz {
4750c78df14eSAriel Elior struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
4751c78df14eSAriel Elior int rc;
4752fe56b9e6SYuval Mintz
4753fe56b9e6SYuval Mintz /* Store the precompiled init data ptrs */
47541408cc1fSYuval Mintz if (IS_PF(cdev))
4755fe56b9e6SYuval Mintz qed_init_iro_array(cdev);
4756fe56b9e6SYuval Mintz
4757fe56b9e6SYuval Mintz /* Initialize the first hwfn - will learn number of hwfns */
4758c78df14eSAriel Elior rc = qed_hw_prepare_single(p_hwfn,
4759c78df14eSAriel Elior cdev->regview,
47608366d520SMichal Kalderon cdev->doorbells,
47618366d520SMichal Kalderon cdev->db_phys_addr,
47628366d520SMichal Kalderon personality);
4763fe56b9e6SYuval Mintz if (rc)
4764fe56b9e6SYuval Mintz return rc;
4765fe56b9e6SYuval Mintz
4766c78df14eSAriel Elior personality = p_hwfn->hw_info.personality;
4767fe56b9e6SYuval Mintz
4768fe56b9e6SYuval Mintz /* Initialize the rest of the hwfns */
4769c78df14eSAriel Elior if (cdev->num_hwfns > 1) {
4770fe56b9e6SYuval Mintz void __iomem *p_regview, *p_doorbell;
47718366d520SMichal Kalderon u64 db_phys_addr;
47728366d520SMichal Kalderon u32 offset;
4773fe56b9e6SYuval Mintz
4774c78df14eSAriel Elior /* adjust bar offset for second engine */
47758366d520SMichal Kalderon offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
477615582962SRahul Verma BAR_ID_0) / 2;
47778366d520SMichal Kalderon p_regview = cdev->regview + offset;
4778c78df14eSAriel Elior
47798366d520SMichal Kalderon offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
478015582962SRahul Verma BAR_ID_1) / 2;
47818366d520SMichal Kalderon
47828366d520SMichal Kalderon p_doorbell = cdev->doorbells + offset;
47838366d520SMichal Kalderon
47848366d520SMichal Kalderon db_phys_addr = cdev->db_phys_addr + offset;
4785c78df14eSAriel Elior
4786c78df14eSAriel Elior /* prepare second hw function */
4787c78df14eSAriel Elior rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
47888366d520SMichal Kalderon p_doorbell, db_phys_addr,
47898366d520SMichal Kalderon personality);
4790c78df14eSAriel Elior
4791c78df14eSAriel Elior /* in case of error, need to free the previously
4792c78df14eSAriel Elior * initiliazed hwfn 0.
4793c78df14eSAriel Elior */
4794fe56b9e6SYuval Mintz if (rc) {
47951408cc1fSYuval Mintz if (IS_PF(cdev)) {
4796c78df14eSAriel Elior qed_init_free(p_hwfn);
479713cf8aabSSudarsana Reddy Kalluru qed_mcp_nvm_info_free(p_hwfn);
4798c78df14eSAriel Elior qed_mcp_free(p_hwfn);
4799c78df14eSAriel Elior qed_hw_hwfn_free(p_hwfn);
4800fe56b9e6SYuval Mintz }
4801fe56b9e6SYuval Mintz }
48021408cc1fSYuval Mintz }
4803fe56b9e6SYuval Mintz
4804c78df14eSAriel Elior return rc;
4805fe56b9e6SYuval Mintz }
4806fe56b9e6SYuval Mintz
qed_hw_remove(struct qed_dev * cdev)4807fe56b9e6SYuval Mintz void qed_hw_remove(struct qed_dev *cdev)
4808fe56b9e6SYuval Mintz {
48090fefbfbaSSudarsana Kalluru struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
4810fe56b9e6SYuval Mintz int i;
4811fe56b9e6SYuval Mintz
48120fefbfbaSSudarsana Kalluru if (IS_PF(cdev))
48130fefbfbaSSudarsana Kalluru qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
48140fefbfbaSSudarsana Kalluru QED_OV_DRIVER_STATE_NOT_LOADED);
48150fefbfbaSSudarsana Kalluru
4816fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) {
4817fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4818fe56b9e6SYuval Mintz
48191408cc1fSYuval Mintz if (IS_VF(cdev)) {
48200b55e27dSYuval Mintz qed_vf_pf_release(p_hwfn);
48211408cc1fSYuval Mintz continue;
48221408cc1fSYuval Mintz }
48231408cc1fSYuval Mintz
4824fe56b9e6SYuval Mintz qed_init_free(p_hwfn);
4825fe56b9e6SYuval Mintz qed_hw_hwfn_free(p_hwfn);
4826fe56b9e6SYuval Mintz qed_mcp_free(p_hwfn);
4827fe56b9e6SYuval Mintz }
482832a47e72SYuval Mintz
482932a47e72SYuval Mintz qed_iov_free_hw_info(cdev);
483043645ce0SSudarsana Reddy Kalluru
483113cf8aabSSudarsana Reddy Kalluru qed_mcp_nvm_info_free(p_hwfn);
4832fe56b9e6SYuval Mintz }
4833fe56b9e6SYuval Mintz
qed_fw_l2_queue(struct qed_hwfn * p_hwfn,u16 src_id,u16 * dst_id)4834a91eb52aSYuval Mintz int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
4835cee4d264SManish Chopra {
4836cee4d264SManish Chopra if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
4837cee4d264SManish Chopra u16 min, max;
4838cee4d264SManish Chopra
4839cee4d264SManish Chopra min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
4840cee4d264SManish Chopra max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
4841cee4d264SManish Chopra DP_NOTICE(p_hwfn,
4842cee4d264SManish Chopra "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
4843cee4d264SManish Chopra src_id, min, max);
4844cee4d264SManish Chopra
4845cee4d264SManish Chopra return -EINVAL;
4846cee4d264SManish Chopra }
4847cee4d264SManish Chopra
4848cee4d264SManish Chopra *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
4849cee4d264SManish Chopra
4850cee4d264SManish Chopra return 0;
4851cee4d264SManish Chopra }
4852cee4d264SManish Chopra
qed_fw_vport(struct qed_hwfn * p_hwfn,u8 src_id,u8 * dst_id)48531a635e48SYuval Mintz int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
4854cee4d264SManish Chopra {
4855cee4d264SManish Chopra if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
4856cee4d264SManish Chopra u8 min, max;
4857cee4d264SManish Chopra
4858cee4d264SManish Chopra min = (u8)RESC_START(p_hwfn, QED_VPORT);
4859cee4d264SManish Chopra max = min + RESC_NUM(p_hwfn, QED_VPORT);
4860cee4d264SManish Chopra DP_NOTICE(p_hwfn,
4861cee4d264SManish Chopra "vport id [%d] is not valid, available indices [%d - %d]\n",
4862cee4d264SManish Chopra src_id, min, max);
4863cee4d264SManish Chopra
4864cee4d264SManish Chopra return -EINVAL;
4865cee4d264SManish Chopra }
4866cee4d264SManish Chopra
4867cee4d264SManish Chopra *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
4868cee4d264SManish Chopra
4869cee4d264SManish Chopra return 0;
4870cee4d264SManish Chopra }
4871cee4d264SManish Chopra
qed_fw_rss_eng(struct qed_hwfn * p_hwfn,u8 src_id,u8 * dst_id)48721a635e48SYuval Mintz int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
4873cee4d264SManish Chopra {
4874cee4d264SManish Chopra if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
4875cee4d264SManish Chopra u8 min, max;
4876cee4d264SManish Chopra
4877cee4d264SManish Chopra min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
4878cee4d264SManish Chopra max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
4879cee4d264SManish Chopra DP_NOTICE(p_hwfn,
4880cee4d264SManish Chopra "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
4881cee4d264SManish Chopra src_id, min, max);
4882cee4d264SManish Chopra
4883cee4d264SManish Chopra return -EINVAL;
4884cee4d264SManish Chopra }
4885cee4d264SManish Chopra
4886cee4d264SManish Chopra *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
4887cee4d264SManish Chopra
4888cee4d264SManish Chopra return 0;
4889cee4d264SManish Chopra }
4890bcd197c8SManish Chopra
qed_set_coalesce(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 hw_addr,void * p_eth_qzone,size_t eth_qzone_size,u8 timeset)4891722003acSSudarsana Reddy Kalluru static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4892722003acSSudarsana Reddy Kalluru u32 hw_addr, void *p_eth_qzone,
4893722003acSSudarsana Reddy Kalluru size_t eth_qzone_size, u8 timeset)
4894722003acSSudarsana Reddy Kalluru {
4895722003acSSudarsana Reddy Kalluru struct coalescing_timeset *p_coal_timeset;
4896722003acSSudarsana Reddy Kalluru
4897722003acSSudarsana Reddy Kalluru if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
4898722003acSSudarsana Reddy Kalluru DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
4899722003acSSudarsana Reddy Kalluru return -EINVAL;
4900722003acSSudarsana Reddy Kalluru }
4901722003acSSudarsana Reddy Kalluru
4902722003acSSudarsana Reddy Kalluru p_coal_timeset = p_eth_qzone;
4903477f2d14SRahul Verma memset(p_eth_qzone, 0, eth_qzone_size);
4904722003acSSudarsana Reddy Kalluru SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
4905722003acSSudarsana Reddy Kalluru SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
4906722003acSSudarsana Reddy Kalluru qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
4907722003acSSudarsana Reddy Kalluru
4908722003acSSudarsana Reddy Kalluru return 0;
4909722003acSSudarsana Reddy Kalluru }
4910722003acSSudarsana Reddy Kalluru
qed_set_queue_coalesce(u16 rx_coal,u16 tx_coal,void * p_handle)4911477f2d14SRahul Verma int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle)
4912477f2d14SRahul Verma {
4913477f2d14SRahul Verma struct qed_queue_cid *p_cid = p_handle;
4914477f2d14SRahul Verma struct qed_hwfn *p_hwfn;
4915477f2d14SRahul Verma struct qed_ptt *p_ptt;
4916477f2d14SRahul Verma int rc = 0;
4917477f2d14SRahul Verma
4918477f2d14SRahul Verma p_hwfn = p_cid->p_owner;
4919477f2d14SRahul Verma
4920477f2d14SRahul Verma if (IS_VF(p_hwfn->cdev))
4921477f2d14SRahul Verma return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid);
4922477f2d14SRahul Verma
4923477f2d14SRahul Verma p_ptt = qed_ptt_acquire(p_hwfn);
4924477f2d14SRahul Verma if (!p_ptt)
4925477f2d14SRahul Verma return -EAGAIN;
4926477f2d14SRahul Verma
4927477f2d14SRahul Verma if (rx_coal) {
4928477f2d14SRahul Verma rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
4929477f2d14SRahul Verma if (rc)
4930477f2d14SRahul Verma goto out;
4931477f2d14SRahul Verma p_hwfn->cdev->rx_coalesce_usecs = rx_coal;
4932477f2d14SRahul Verma }
4933477f2d14SRahul Verma
4934477f2d14SRahul Verma if (tx_coal) {
4935477f2d14SRahul Verma rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
4936477f2d14SRahul Verma if (rc)
4937477f2d14SRahul Verma goto out;
4938477f2d14SRahul Verma p_hwfn->cdev->tx_coalesce_usecs = tx_coal;
4939477f2d14SRahul Verma }
4940477f2d14SRahul Verma out:
4941477f2d14SRahul Verma qed_ptt_release(p_hwfn, p_ptt);
4942477f2d14SRahul Verma return rc;
4943477f2d14SRahul Verma }
4944477f2d14SRahul Verma
qed_set_rxq_coalesce(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 coalesce,struct qed_queue_cid * p_cid)4945477f2d14SRahul Verma int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
4946477f2d14SRahul Verma struct qed_ptt *p_ptt,
4947477f2d14SRahul Verma u16 coalesce, struct qed_queue_cid *p_cid)
4948722003acSSudarsana Reddy Kalluru {
4949722003acSSudarsana Reddy Kalluru struct ustorm_eth_queue_zone eth_qzone;
4950722003acSSudarsana Reddy Kalluru u8 timeset, timer_res;
4951722003acSSudarsana Reddy Kalluru u32 address;
4952722003acSSudarsana Reddy Kalluru int rc;
4953722003acSSudarsana Reddy Kalluru
4954722003acSSudarsana Reddy Kalluru /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
4955722003acSSudarsana Reddy Kalluru if (coalesce <= 0x7F) {
4956722003acSSudarsana Reddy Kalluru timer_res = 0;
4957722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0xFF) {
4958722003acSSudarsana Reddy Kalluru timer_res = 1;
4959722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0x1FF) {
4960722003acSSudarsana Reddy Kalluru timer_res = 2;
4961722003acSSudarsana Reddy Kalluru } else {
4962722003acSSudarsana Reddy Kalluru DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
4963722003acSSudarsana Reddy Kalluru return -EINVAL;
4964722003acSSudarsana Reddy Kalluru }
4965722003acSSudarsana Reddy Kalluru timeset = (u8)(coalesce >> timer_res);
4966722003acSSudarsana Reddy Kalluru
4967477f2d14SRahul Verma rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res,
4968477f2d14SRahul Verma p_cid->sb_igu_id, false);
4969722003acSSudarsana Reddy Kalluru if (rc)
4970722003acSSudarsana Reddy Kalluru goto out;
4971722003acSSudarsana Reddy Kalluru
4972477f2d14SRahul Verma address = BAR0_MAP_REG_USDM_RAM +
4973e2dbc223SPrabhakar Kushwaha USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
4974722003acSSudarsana Reddy Kalluru
4975722003acSSudarsana Reddy Kalluru rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone,
4976722003acSSudarsana Reddy Kalluru sizeof(struct ustorm_eth_queue_zone), timeset);
4977722003acSSudarsana Reddy Kalluru if (rc)
4978722003acSSudarsana Reddy Kalluru goto out;
4979722003acSSudarsana Reddy Kalluru
4980722003acSSudarsana Reddy Kalluru out:
4981722003acSSudarsana Reddy Kalluru return rc;
4982722003acSSudarsana Reddy Kalluru }
4983722003acSSudarsana Reddy Kalluru
qed_set_txq_coalesce(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 coalesce,struct qed_queue_cid * p_cid)4984477f2d14SRahul Verma int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
4985477f2d14SRahul Verma struct qed_ptt *p_ptt,
4986477f2d14SRahul Verma u16 coalesce, struct qed_queue_cid *p_cid)
4987722003acSSudarsana Reddy Kalluru {
4988722003acSSudarsana Reddy Kalluru struct xstorm_eth_queue_zone eth_qzone;
4989722003acSSudarsana Reddy Kalluru u8 timeset, timer_res;
4990722003acSSudarsana Reddy Kalluru u32 address;
4991722003acSSudarsana Reddy Kalluru int rc;
4992722003acSSudarsana Reddy Kalluru
4993722003acSSudarsana Reddy Kalluru /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
4994722003acSSudarsana Reddy Kalluru if (coalesce <= 0x7F) {
4995722003acSSudarsana Reddy Kalluru timer_res = 0;
4996722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0xFF) {
4997722003acSSudarsana Reddy Kalluru timer_res = 1;
4998722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0x1FF) {
4999722003acSSudarsana Reddy Kalluru timer_res = 2;
5000722003acSSudarsana Reddy Kalluru } else {
5001722003acSSudarsana Reddy Kalluru DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
5002722003acSSudarsana Reddy Kalluru return -EINVAL;
5003722003acSSudarsana Reddy Kalluru }
5004722003acSSudarsana Reddy Kalluru timeset = (u8)(coalesce >> timer_res);
5005722003acSSudarsana Reddy Kalluru
5006477f2d14SRahul Verma rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res,
5007477f2d14SRahul Verma p_cid->sb_igu_id, true);
5008722003acSSudarsana Reddy Kalluru if (rc)
5009722003acSSudarsana Reddy Kalluru goto out;
5010722003acSSudarsana Reddy Kalluru
5011477f2d14SRahul Verma address = BAR0_MAP_REG_XSDM_RAM +
5012e2dbc223SPrabhakar Kushwaha XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
5013722003acSSudarsana Reddy Kalluru
5014722003acSSudarsana Reddy Kalluru rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone,
5015722003acSSudarsana Reddy Kalluru sizeof(struct xstorm_eth_queue_zone), timeset);
5016722003acSSudarsana Reddy Kalluru out:
5017722003acSSudarsana Reddy Kalluru return rc;
5018722003acSSudarsana Reddy Kalluru }
5019722003acSSudarsana Reddy Kalluru
5020bcd197c8SManish Chopra /* Calculate final WFQ values for all vports and configure them.
5021bcd197c8SManish Chopra * After this configuration each vport will have
5022bcd197c8SManish Chopra * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
5023bcd197c8SManish Chopra */
qed_configure_wfq_for_all_vports(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 min_pf_rate)5024bcd197c8SManish Chopra static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
5025bcd197c8SManish Chopra struct qed_ptt *p_ptt,
5026bcd197c8SManish Chopra u32 min_pf_rate)
5027bcd197c8SManish Chopra {
5028bcd197c8SManish Chopra struct init_qm_vport_params *vport_params;
5029bcd197c8SManish Chopra int i;
5030bcd197c8SManish Chopra
5031bcd197c8SManish Chopra vport_params = p_hwfn->qm_info.qm_vport_params;
5032bcd197c8SManish Chopra
5033bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
5034bcd197c8SManish Chopra u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
5035bcd197c8SManish Chopra
503692fae6fbSMichal Kalderon vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) /
5037bcd197c8SManish Chopra min_pf_rate;
5038bcd197c8SManish Chopra qed_init_vport_wfq(p_hwfn, p_ptt,
5039bcd197c8SManish Chopra vport_params[i].first_tx_pq_id,
504092fae6fbSMichal Kalderon vport_params[i].wfq);
5041bcd197c8SManish Chopra }
5042bcd197c8SManish Chopra }
5043bcd197c8SManish Chopra
qed_init_wfq_default_param(struct qed_hwfn * p_hwfn,u32 min_pf_rate)5044bcd197c8SManish Chopra static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
5045bcd197c8SManish Chopra u32 min_pf_rate)
5046bcd197c8SManish Chopra
5047bcd197c8SManish Chopra {
5048bcd197c8SManish Chopra int i;
5049bcd197c8SManish Chopra
5050bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
505192fae6fbSMichal Kalderon p_hwfn->qm_info.qm_vport_params[i].wfq = 1;
5052bcd197c8SManish Chopra }
5053bcd197c8SManish Chopra
qed_disable_wfq_for_all_vports(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 min_pf_rate)5054bcd197c8SManish Chopra static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
5055bcd197c8SManish Chopra struct qed_ptt *p_ptt,
5056bcd197c8SManish Chopra u32 min_pf_rate)
5057bcd197c8SManish Chopra {
5058bcd197c8SManish Chopra struct init_qm_vport_params *vport_params;
5059bcd197c8SManish Chopra int i;
5060bcd197c8SManish Chopra
5061bcd197c8SManish Chopra vport_params = p_hwfn->qm_info.qm_vport_params;
5062bcd197c8SManish Chopra
5063bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
5064bcd197c8SManish Chopra qed_init_wfq_default_param(p_hwfn, min_pf_rate);
5065bcd197c8SManish Chopra qed_init_vport_wfq(p_hwfn, p_ptt,
5066bcd197c8SManish Chopra vport_params[i].first_tx_pq_id,
506792fae6fbSMichal Kalderon vport_params[i].wfq);
5068bcd197c8SManish Chopra }
5069bcd197c8SManish Chopra }
5070bcd197c8SManish Chopra
5071bcd197c8SManish Chopra /* This function performs several validations for WFQ
5072bcd197c8SManish Chopra * configuration and required min rate for a given vport
5073bcd197c8SManish Chopra * 1. req_rate must be greater than one percent of min_pf_rate.
5074bcd197c8SManish Chopra * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
5075bcd197c8SManish Chopra * rates to get less than one percent of min_pf_rate.
5076bcd197c8SManish Chopra * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
5077bcd197c8SManish Chopra */
qed_init_wfq_param(struct qed_hwfn * p_hwfn,u16 vport_id,u32 req_rate,u32 min_pf_rate)5078bcd197c8SManish Chopra static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
50791a635e48SYuval Mintz u16 vport_id, u32 req_rate, u32 min_pf_rate)
5080bcd197c8SManish Chopra {
5081bcd197c8SManish Chopra u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
5082bcd197c8SManish Chopra int non_requested_count = 0, req_count = 0, i, num_vports;
5083bcd197c8SManish Chopra
5084bcd197c8SManish Chopra num_vports = p_hwfn->qm_info.num_vports;
5085bcd197c8SManish Chopra
5086*1a9dc561SDaniil Tatianin if (num_vports < 2) {
5087*1a9dc561SDaniil Tatianin DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports);
5088*1a9dc561SDaniil Tatianin return -EINVAL;
5089*1a9dc561SDaniil Tatianin }
5090*1a9dc561SDaniil Tatianin
5091bcd197c8SManish Chopra /* Accounting for the vports which are configured for WFQ explicitly */
5092bcd197c8SManish Chopra for (i = 0; i < num_vports; i++) {
5093bcd197c8SManish Chopra u32 tmp_speed;
5094bcd197c8SManish Chopra
5095bcd197c8SManish Chopra if ((i != vport_id) &&
5096bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[i].configured) {
5097bcd197c8SManish Chopra req_count++;
5098bcd197c8SManish Chopra tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
5099bcd197c8SManish Chopra total_req_min_rate += tmp_speed;
5100bcd197c8SManish Chopra }
5101bcd197c8SManish Chopra }
5102bcd197c8SManish Chopra
5103bcd197c8SManish Chopra /* Include current vport data as well */
5104bcd197c8SManish Chopra req_count++;
5105bcd197c8SManish Chopra total_req_min_rate += req_rate;
5106bcd197c8SManish Chopra non_requested_count = num_vports - req_count;
5107bcd197c8SManish Chopra
5108bcd197c8SManish Chopra if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
5109bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
5110bcd197c8SManish Chopra "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
5111bcd197c8SManish Chopra vport_id, req_rate, min_pf_rate);
5112bcd197c8SManish Chopra return -EINVAL;
5113bcd197c8SManish Chopra }
5114bcd197c8SManish Chopra
5115bcd197c8SManish Chopra if (num_vports > QED_WFQ_UNIT) {
5116bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
5117bcd197c8SManish Chopra "Number of vports is greater than %d\n",
5118bcd197c8SManish Chopra QED_WFQ_UNIT);
5119bcd197c8SManish Chopra return -EINVAL;
5120bcd197c8SManish Chopra }
5121bcd197c8SManish Chopra
5122bcd197c8SManish Chopra if (total_req_min_rate > min_pf_rate) {
5123bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
5124bcd197c8SManish Chopra "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
5125bcd197c8SManish Chopra total_req_min_rate, min_pf_rate);
5126bcd197c8SManish Chopra return -EINVAL;
5127bcd197c8SManish Chopra }
5128bcd197c8SManish Chopra
5129bcd197c8SManish Chopra total_left_rate = min_pf_rate - total_req_min_rate;
5130bcd197c8SManish Chopra
5131bcd197c8SManish Chopra left_rate_per_vp = total_left_rate / non_requested_count;
5132bcd197c8SManish Chopra if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) {
5133bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
5134bcd197c8SManish Chopra "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
5135bcd197c8SManish Chopra left_rate_per_vp, min_pf_rate);
5136bcd197c8SManish Chopra return -EINVAL;
5137bcd197c8SManish Chopra }
5138bcd197c8SManish Chopra
5139bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
5140bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[vport_id].configured = true;
5141bcd197c8SManish Chopra
5142bcd197c8SManish Chopra for (i = 0; i < num_vports; i++) {
5143bcd197c8SManish Chopra if (p_hwfn->qm_info.wfq_data[i].configured)
5144bcd197c8SManish Chopra continue;
5145bcd197c8SManish Chopra
5146bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
5147bcd197c8SManish Chopra }
5148bcd197c8SManish Chopra
5149bcd197c8SManish Chopra return 0;
5150bcd197c8SManish Chopra }
5151bcd197c8SManish Chopra
__qed_configure_vport_wfq(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 vp_id,u32 rate)5152733def6aSYuval Mintz static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
5153733def6aSYuval Mintz struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
5154733def6aSYuval Mintz {
5155733def6aSYuval Mintz struct qed_mcp_link_state *p_link;
5156733def6aSYuval Mintz int rc = 0;
5157733def6aSYuval Mintz
5158733def6aSYuval Mintz p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
5159733def6aSYuval Mintz
5160733def6aSYuval Mintz if (!p_link->min_pf_rate) {
5161733def6aSYuval Mintz p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
5162733def6aSYuval Mintz p_hwfn->qm_info.wfq_data[vp_id].configured = true;
5163733def6aSYuval Mintz return rc;
5164733def6aSYuval Mintz }
5165733def6aSYuval Mintz
5166733def6aSYuval Mintz rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
5167733def6aSYuval Mintz
51681a635e48SYuval Mintz if (!rc)
5169733def6aSYuval Mintz qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
5170733def6aSYuval Mintz p_link->min_pf_rate);
5171733def6aSYuval Mintz else
5172733def6aSYuval Mintz DP_NOTICE(p_hwfn,
5173733def6aSYuval Mintz "Validation failed while configuring min rate\n");
5174733def6aSYuval Mintz
5175733def6aSYuval Mintz return rc;
5176733def6aSYuval Mintz }
5177733def6aSYuval Mintz
__qed_configure_vp_wfq_on_link_change(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 min_pf_rate)5178bcd197c8SManish Chopra static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
5179bcd197c8SManish Chopra struct qed_ptt *p_ptt,
5180bcd197c8SManish Chopra u32 min_pf_rate)
5181bcd197c8SManish Chopra {
5182bcd197c8SManish Chopra bool use_wfq = false;
5183bcd197c8SManish Chopra int rc = 0;
5184bcd197c8SManish Chopra u16 i;
5185bcd197c8SManish Chopra
5186bcd197c8SManish Chopra /* Validate all pre configured vports for wfq */
5187bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
5188bcd197c8SManish Chopra u32 rate;
5189bcd197c8SManish Chopra
5190bcd197c8SManish Chopra if (!p_hwfn->qm_info.wfq_data[i].configured)
5191bcd197c8SManish Chopra continue;
5192bcd197c8SManish Chopra
5193bcd197c8SManish Chopra rate = p_hwfn->qm_info.wfq_data[i].min_speed;
5194bcd197c8SManish Chopra use_wfq = true;
5195bcd197c8SManish Chopra
5196bcd197c8SManish Chopra rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
5197bcd197c8SManish Chopra if (rc) {
5198bcd197c8SManish Chopra DP_NOTICE(p_hwfn,
5199bcd197c8SManish Chopra "WFQ validation failed while configuring min rate\n");
5200bcd197c8SManish Chopra break;
5201bcd197c8SManish Chopra }
5202bcd197c8SManish Chopra }
5203bcd197c8SManish Chopra
5204bcd197c8SManish Chopra if (!rc && use_wfq)
5205bcd197c8SManish Chopra qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
5206bcd197c8SManish Chopra else
5207bcd197c8SManish Chopra qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
5208bcd197c8SManish Chopra
5209bcd197c8SManish Chopra return rc;
5210bcd197c8SManish Chopra }
5211bcd197c8SManish Chopra
5212733def6aSYuval Mintz /* Main API for qed clients to configure vport min rate.
5213733def6aSYuval Mintz * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
5214733def6aSYuval Mintz * rate - Speed in Mbps needs to be assigned to a given vport.
5215733def6aSYuval Mintz */
qed_configure_vport_wfq(struct qed_dev * cdev,u16 vp_id,u32 rate)5216733def6aSYuval Mintz int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
5217733def6aSYuval Mintz {
5218733def6aSYuval Mintz int i, rc = -EINVAL;
5219733def6aSYuval Mintz
5220733def6aSYuval Mintz /* Currently not supported; Might change in future */
5221733def6aSYuval Mintz if (cdev->num_hwfns > 1) {
5222733def6aSYuval Mintz DP_NOTICE(cdev,
5223733def6aSYuval Mintz "WFQ configuration is not supported for this device\n");
5224733def6aSYuval Mintz return rc;
5225733def6aSYuval Mintz }
5226733def6aSYuval Mintz
5227733def6aSYuval Mintz for_each_hwfn(cdev, i) {
5228733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5229733def6aSYuval Mintz struct qed_ptt *p_ptt;
5230733def6aSYuval Mintz
5231733def6aSYuval Mintz p_ptt = qed_ptt_acquire(p_hwfn);
5232733def6aSYuval Mintz if (!p_ptt)
5233733def6aSYuval Mintz return -EBUSY;
5234733def6aSYuval Mintz
5235733def6aSYuval Mintz rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
5236733def6aSYuval Mintz
5237d572c430SYuval Mintz if (rc) {
5238733def6aSYuval Mintz qed_ptt_release(p_hwfn, p_ptt);
5239733def6aSYuval Mintz return rc;
5240733def6aSYuval Mintz }
5241733def6aSYuval Mintz
5242733def6aSYuval Mintz qed_ptt_release(p_hwfn, p_ptt);
5243733def6aSYuval Mintz }
5244733def6aSYuval Mintz
5245733def6aSYuval Mintz return rc;
5246733def6aSYuval Mintz }
5247733def6aSYuval Mintz
5248bcd197c8SManish Chopra /* API to configure WFQ from mcp link change */
qed_configure_vp_wfq_on_link_change(struct qed_dev * cdev,struct qed_ptt * p_ptt,u32 min_pf_rate)52496f437d43SMintz, Yuval void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
52506f437d43SMintz, Yuval struct qed_ptt *p_ptt, u32 min_pf_rate)
5251bcd197c8SManish Chopra {
5252bcd197c8SManish Chopra int i;
5253bcd197c8SManish Chopra
52543e7cfce2SYuval Mintz if (cdev->num_hwfns > 1) {
52553e7cfce2SYuval Mintz DP_VERBOSE(cdev,
52563e7cfce2SYuval Mintz NETIF_MSG_LINK,
52573e7cfce2SYuval Mintz "WFQ configuration is not supported for this device\n");
52583e7cfce2SYuval Mintz return;
52593e7cfce2SYuval Mintz }
52603e7cfce2SYuval Mintz
5261bcd197c8SManish Chopra for_each_hwfn(cdev, i) {
5262bcd197c8SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5263bcd197c8SManish Chopra
52646f437d43SMintz, Yuval __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
5265bcd197c8SManish Chopra min_pf_rate);
5266bcd197c8SManish Chopra }
5267bcd197c8SManish Chopra }
52684b01e519SManish Chopra
__qed_configure_pf_max_bandwidth(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_link_state * p_link,u8 max_bw)52694b01e519SManish Chopra int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
52704b01e519SManish Chopra struct qed_ptt *p_ptt,
52714b01e519SManish Chopra struct qed_mcp_link_state *p_link,
52724b01e519SManish Chopra u8 max_bw)
52734b01e519SManish Chopra {
52744b01e519SManish Chopra int rc = 0;
52754b01e519SManish Chopra
52764b01e519SManish Chopra p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
52774b01e519SManish Chopra
52784b01e519SManish Chopra if (!p_link->line_speed && (max_bw != 100))
52794b01e519SManish Chopra return rc;
52804b01e519SManish Chopra
52814b01e519SManish Chopra p_link->speed = (p_link->line_speed * max_bw) / 100;
52824b01e519SManish Chopra p_hwfn->qm_info.pf_rl = p_link->speed;
52834b01e519SManish Chopra
52844b01e519SManish Chopra /* Since the limiter also affects Tx-switched traffic, we don't want it
52854b01e519SManish Chopra * to limit such traffic in case there's no actual limit.
52864b01e519SManish Chopra * In that case, set limit to imaginary high boundary.
52874b01e519SManish Chopra */
52884b01e519SManish Chopra if (max_bw == 100)
52894b01e519SManish Chopra p_hwfn->qm_info.pf_rl = 100000;
52904b01e519SManish Chopra
52914b01e519SManish Chopra rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
52924b01e519SManish Chopra p_hwfn->qm_info.pf_rl);
52934b01e519SManish Chopra
52944b01e519SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
52954b01e519SManish Chopra "Configured MAX bandwidth to be %08x Mb/sec\n",
52964b01e519SManish Chopra p_link->speed);
52974b01e519SManish Chopra
52984b01e519SManish Chopra return rc;
52994b01e519SManish Chopra }
53004b01e519SManish Chopra
53014b01e519SManish Chopra /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
qed_configure_pf_max_bandwidth(struct qed_dev * cdev,u8 max_bw)53024b01e519SManish Chopra int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
53034b01e519SManish Chopra {
53044b01e519SManish Chopra int i, rc = -EINVAL;
53054b01e519SManish Chopra
53064b01e519SManish Chopra if (max_bw < 1 || max_bw > 100) {
53074b01e519SManish Chopra DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
53084b01e519SManish Chopra return rc;
53094b01e519SManish Chopra }
53104b01e519SManish Chopra
53114b01e519SManish Chopra for_each_hwfn(cdev, i) {
53124b01e519SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
53134b01e519SManish Chopra struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
53144b01e519SManish Chopra struct qed_mcp_link_state *p_link;
53154b01e519SManish Chopra struct qed_ptt *p_ptt;
53164b01e519SManish Chopra
53174b01e519SManish Chopra p_link = &p_lead->mcp_info->link_output;
53184b01e519SManish Chopra
53194b01e519SManish Chopra p_ptt = qed_ptt_acquire(p_hwfn);
53204b01e519SManish Chopra if (!p_ptt)
53214b01e519SManish Chopra return -EBUSY;
53224b01e519SManish Chopra
53234b01e519SManish Chopra rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
53244b01e519SManish Chopra p_link, max_bw);
53254b01e519SManish Chopra
53264b01e519SManish Chopra qed_ptt_release(p_hwfn, p_ptt);
53274b01e519SManish Chopra
53284b01e519SManish Chopra if (rc)
53294b01e519SManish Chopra break;
53304b01e519SManish Chopra }
53314b01e519SManish Chopra
53324b01e519SManish Chopra return rc;
53334b01e519SManish Chopra }
5334a64b02d5SManish Chopra
__qed_configure_pf_min_bandwidth(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_link_state * p_link,u8 min_bw)5335a64b02d5SManish Chopra int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
5336a64b02d5SManish Chopra struct qed_ptt *p_ptt,
5337a64b02d5SManish Chopra struct qed_mcp_link_state *p_link,
5338a64b02d5SManish Chopra u8 min_bw)
5339a64b02d5SManish Chopra {
5340a64b02d5SManish Chopra int rc = 0;
5341a64b02d5SManish Chopra
5342a64b02d5SManish Chopra p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
5343a64b02d5SManish Chopra p_hwfn->qm_info.pf_wfq = min_bw;
5344a64b02d5SManish Chopra
5345a64b02d5SManish Chopra if (!p_link->line_speed)
5346a64b02d5SManish Chopra return rc;
5347a64b02d5SManish Chopra
5348a64b02d5SManish Chopra p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
5349a64b02d5SManish Chopra
5350a64b02d5SManish Chopra rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
5351a64b02d5SManish Chopra
5352a64b02d5SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
5353a64b02d5SManish Chopra "Configured MIN bandwidth to be %d Mb/sec\n",
5354a64b02d5SManish Chopra p_link->min_pf_rate);
5355a64b02d5SManish Chopra
5356a64b02d5SManish Chopra return rc;
5357a64b02d5SManish Chopra }
5358a64b02d5SManish Chopra
5359a64b02d5SManish Chopra /* Main API to configure PF min bandwidth where bw range is [1-100] */
qed_configure_pf_min_bandwidth(struct qed_dev * cdev,u8 min_bw)5360a64b02d5SManish Chopra int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
5361a64b02d5SManish Chopra {
5362a64b02d5SManish Chopra int i, rc = -EINVAL;
5363a64b02d5SManish Chopra
5364a64b02d5SManish Chopra if (min_bw < 1 || min_bw > 100) {
5365a64b02d5SManish Chopra DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
5366a64b02d5SManish Chopra return rc;
5367a64b02d5SManish Chopra }
5368a64b02d5SManish Chopra
5369a64b02d5SManish Chopra for_each_hwfn(cdev, i) {
5370a64b02d5SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5371a64b02d5SManish Chopra struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
5372a64b02d5SManish Chopra struct qed_mcp_link_state *p_link;
5373a64b02d5SManish Chopra struct qed_ptt *p_ptt;
5374a64b02d5SManish Chopra
5375a64b02d5SManish Chopra p_link = &p_lead->mcp_info->link_output;
5376a64b02d5SManish Chopra
5377a64b02d5SManish Chopra p_ptt = qed_ptt_acquire(p_hwfn);
5378a64b02d5SManish Chopra if (!p_ptt)
5379a64b02d5SManish Chopra return -EBUSY;
5380a64b02d5SManish Chopra
5381a64b02d5SManish Chopra rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
5382a64b02d5SManish Chopra p_link, min_bw);
5383a64b02d5SManish Chopra if (rc) {
5384a64b02d5SManish Chopra qed_ptt_release(p_hwfn, p_ptt);
5385a64b02d5SManish Chopra return rc;
5386a64b02d5SManish Chopra }
5387a64b02d5SManish Chopra
5388a64b02d5SManish Chopra if (p_link->min_pf_rate) {
5389a64b02d5SManish Chopra u32 min_rate = p_link->min_pf_rate;
5390a64b02d5SManish Chopra
5391a64b02d5SManish Chopra rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
5392a64b02d5SManish Chopra p_ptt,
5393a64b02d5SManish Chopra min_rate);
5394a64b02d5SManish Chopra }
5395a64b02d5SManish Chopra
5396a64b02d5SManish Chopra qed_ptt_release(p_hwfn, p_ptt);
5397a64b02d5SManish Chopra }
5398a64b02d5SManish Chopra
5399a64b02d5SManish Chopra return rc;
5400a64b02d5SManish Chopra }
5401733def6aSYuval Mintz
qed_clean_wfq_db(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)5402733def6aSYuval Mintz void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
5403733def6aSYuval Mintz {
5404733def6aSYuval Mintz struct qed_mcp_link_state *p_link;
5405733def6aSYuval Mintz
5406733def6aSYuval Mintz p_link = &p_hwfn->mcp_info->link_output;
5407733def6aSYuval Mintz
5408733def6aSYuval Mintz if (p_link->min_pf_rate)
5409733def6aSYuval Mintz qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
5410733def6aSYuval Mintz p_link->min_pf_rate);
5411733def6aSYuval Mintz
5412733def6aSYuval Mintz memset(p_hwfn->qm_info.wfq_data, 0,
5413733def6aSYuval Mintz sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
5414733def6aSYuval Mintz }
54159c79ddaaSMintz, Yuval
qed_device_num_ports(struct qed_dev * cdev)54160ebcebbeSSudarsana Reddy Kalluru int qed_device_num_ports(struct qed_dev *cdev)
54179c79ddaaSMintz, Yuval {
54180ebcebbeSSudarsana Reddy Kalluru return cdev->num_ports;
5419db82f70eSsudarsana.kalluru@cavium.com }
5420456a5849SKalderon, Michal
qed_set_fw_mac_addr(__le16 * fw_msb,__le16 * fw_mid,__le16 * fw_lsb,u8 * mac)5421456a5849SKalderon, Michal void qed_set_fw_mac_addr(__le16 *fw_msb,
5422456a5849SKalderon, Michal __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
5423456a5849SKalderon, Michal {
5424456a5849SKalderon, Michal ((u8 *)fw_msb)[0] = mac[1];
5425456a5849SKalderon, Michal ((u8 *)fw_msb)[1] = mac[0];
5426456a5849SKalderon, Michal ((u8 *)fw_mid)[0] = mac[3];
5427456a5849SKalderon, Michal ((u8 *)fw_mid)[1] = mac[2];
5428456a5849SKalderon, Michal ((u8 *)fw_lsb)[0] = mac[5];
5429456a5849SKalderon, Michal ((u8 *)fw_lsb)[1] = mac[4];
5430456a5849SKalderon, Michal }
5431203d136eSPrabhakar Kushwaha
qed_llh_shadow_remove_all_filters(struct qed_dev * cdev,u8 ppfid)5432203d136eSPrabhakar Kushwaha static int qed_llh_shadow_remove_all_filters(struct qed_dev *cdev, u8 ppfid)
5433203d136eSPrabhakar Kushwaha {
5434203d136eSPrabhakar Kushwaha struct qed_llh_info *p_llh_info = cdev->p_llh_info;
5435203d136eSPrabhakar Kushwaha struct qed_llh_filter_info *p_filters;
5436203d136eSPrabhakar Kushwaha int rc;
5437203d136eSPrabhakar Kushwaha
5438203d136eSPrabhakar Kushwaha rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "remove_all");
5439203d136eSPrabhakar Kushwaha if (rc)
5440203d136eSPrabhakar Kushwaha return rc;
5441203d136eSPrabhakar Kushwaha
5442203d136eSPrabhakar Kushwaha p_filters = p_llh_info->pp_filters[ppfid];
5443203d136eSPrabhakar Kushwaha memset(p_filters, 0, NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
5444203d136eSPrabhakar Kushwaha sizeof(*p_filters));
5445203d136eSPrabhakar Kushwaha
5446203d136eSPrabhakar Kushwaha return 0;
5447203d136eSPrabhakar Kushwaha }
5448203d136eSPrabhakar Kushwaha
qed_llh_clear_ppfid_filters(struct qed_dev * cdev,u8 ppfid)5449203d136eSPrabhakar Kushwaha static void qed_llh_clear_ppfid_filters(struct qed_dev *cdev, u8 ppfid)
5450203d136eSPrabhakar Kushwaha {
5451203d136eSPrabhakar Kushwaha struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
5452203d136eSPrabhakar Kushwaha struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
5453203d136eSPrabhakar Kushwaha u8 filter_idx, abs_ppfid;
5454203d136eSPrabhakar Kushwaha int rc = 0;
5455203d136eSPrabhakar Kushwaha
5456203d136eSPrabhakar Kushwaha if (!p_ptt)
5457203d136eSPrabhakar Kushwaha return;
5458203d136eSPrabhakar Kushwaha
5459203d136eSPrabhakar Kushwaha if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) &&
5460203d136eSPrabhakar Kushwaha !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
5461203d136eSPrabhakar Kushwaha goto out;
5462203d136eSPrabhakar Kushwaha
5463203d136eSPrabhakar Kushwaha rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
5464203d136eSPrabhakar Kushwaha if (rc)
5465203d136eSPrabhakar Kushwaha goto out;
5466203d136eSPrabhakar Kushwaha
5467203d136eSPrabhakar Kushwaha rc = qed_llh_shadow_remove_all_filters(cdev, ppfid);
5468203d136eSPrabhakar Kushwaha if (rc)
5469203d136eSPrabhakar Kushwaha goto out;
5470203d136eSPrabhakar Kushwaha
5471203d136eSPrabhakar Kushwaha for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
5472203d136eSPrabhakar Kushwaha filter_idx++) {
5473203d136eSPrabhakar Kushwaha rc = qed_llh_remove_filter(p_hwfn, p_ptt,
5474203d136eSPrabhakar Kushwaha abs_ppfid, filter_idx);
5475203d136eSPrabhakar Kushwaha if (rc)
5476203d136eSPrabhakar Kushwaha goto out;
5477203d136eSPrabhakar Kushwaha }
5478203d136eSPrabhakar Kushwaha out:
5479203d136eSPrabhakar Kushwaha qed_ptt_release(p_hwfn, p_ptt);
5480203d136eSPrabhakar Kushwaha }
5481203d136eSPrabhakar Kushwaha
qed_llh_add_src_tcp_port_filter(struct qed_dev * cdev,u16 src_port)5482203d136eSPrabhakar Kushwaha int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port)
5483203d136eSPrabhakar Kushwaha {
5484203d136eSPrabhakar Kushwaha return qed_llh_add_protocol_filter(cdev, 0,
5485203d136eSPrabhakar Kushwaha QED_LLH_FILTER_TCP_SRC_PORT,
5486203d136eSPrabhakar Kushwaha src_port, QED_LLH_DONT_CARE);
5487203d136eSPrabhakar Kushwaha }
5488203d136eSPrabhakar Kushwaha
qed_llh_remove_src_tcp_port_filter(struct qed_dev * cdev,u16 src_port)5489203d136eSPrabhakar Kushwaha void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port)
5490203d136eSPrabhakar Kushwaha {
5491203d136eSPrabhakar Kushwaha qed_llh_remove_protocol_filter(cdev, 0,
5492203d136eSPrabhakar Kushwaha QED_LLH_FILTER_TCP_SRC_PORT,
5493203d136eSPrabhakar Kushwaha src_port, QED_LLH_DONT_CARE);
5494203d136eSPrabhakar Kushwaha }
5495203d136eSPrabhakar Kushwaha
qed_llh_add_dst_tcp_port_filter(struct qed_dev * cdev,u16 dest_port)5496203d136eSPrabhakar Kushwaha int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port)
5497203d136eSPrabhakar Kushwaha {
5498203d136eSPrabhakar Kushwaha return qed_llh_add_protocol_filter(cdev, 0,
5499203d136eSPrabhakar Kushwaha QED_LLH_FILTER_TCP_DEST_PORT,
5500203d136eSPrabhakar Kushwaha QED_LLH_DONT_CARE, dest_port);
5501203d136eSPrabhakar Kushwaha }
5502203d136eSPrabhakar Kushwaha
qed_llh_remove_dst_tcp_port_filter(struct qed_dev * cdev,u16 dest_port)5503203d136eSPrabhakar Kushwaha void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port)
5504203d136eSPrabhakar Kushwaha {
5505203d136eSPrabhakar Kushwaha qed_llh_remove_protocol_filter(cdev, 0,
5506203d136eSPrabhakar Kushwaha QED_LLH_FILTER_TCP_DEST_PORT,
5507203d136eSPrabhakar Kushwaha QED_LLH_DONT_CARE, dest_port);
5508203d136eSPrabhakar Kushwaha }
5509203d136eSPrabhakar Kushwaha
qed_llh_clear_all_filters(struct qed_dev * cdev)5510203d136eSPrabhakar Kushwaha void qed_llh_clear_all_filters(struct qed_dev *cdev)
5511203d136eSPrabhakar Kushwaha {
5512203d136eSPrabhakar Kushwaha u8 ppfid;
5513203d136eSPrabhakar Kushwaha
5514203d136eSPrabhakar Kushwaha if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) &&
5515203d136eSPrabhakar Kushwaha !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
5516203d136eSPrabhakar Kushwaha return;
5517203d136eSPrabhakar Kushwaha
5518203d136eSPrabhakar Kushwaha for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++)
5519203d136eSPrabhakar Kushwaha qed_llh_clear_ppfid_filters(cdev, ppfid);
5520203d136eSPrabhakar Kushwaha }
5521