1fe56b9e6SYuval Mintz /* QLogic qed NIC Driver 2e8f1cb50SMintz, Yuval * Copyright (c) 2015-2017 QLogic Corporation 3fe56b9e6SYuval Mintz * 4e8f1cb50SMintz, Yuval * This software is available to you under a choice of one of two 5e8f1cb50SMintz, Yuval * licenses. You may choose to be licensed under the terms of the GNU 6e8f1cb50SMintz, Yuval * General Public License (GPL) Version 2, available from the file 7e8f1cb50SMintz, Yuval * COPYING in the main directory of this source tree, or the 8e8f1cb50SMintz, Yuval * OpenIB.org BSD license below: 9e8f1cb50SMintz, Yuval * 10e8f1cb50SMintz, Yuval * Redistribution and use in source and binary forms, with or 11e8f1cb50SMintz, Yuval * without modification, are permitted provided that the following 12e8f1cb50SMintz, Yuval * conditions are met: 13e8f1cb50SMintz, Yuval * 14e8f1cb50SMintz, Yuval * - Redistributions of source code must retain the above 15e8f1cb50SMintz, Yuval * copyright notice, this list of conditions and the following 16e8f1cb50SMintz, Yuval * disclaimer. 17e8f1cb50SMintz, Yuval * 18e8f1cb50SMintz, Yuval * - Redistributions in binary form must reproduce the above 19e8f1cb50SMintz, Yuval * copyright notice, this list of conditions and the following 20e8f1cb50SMintz, Yuval * disclaimer in the documentation and /or other materials 21e8f1cb50SMintz, Yuval * provided with the distribution. 22e8f1cb50SMintz, Yuval * 23e8f1cb50SMintz, Yuval * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e8f1cb50SMintz, Yuval * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e8f1cb50SMintz, Yuval * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e8f1cb50SMintz, Yuval * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e8f1cb50SMintz, Yuval * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e8f1cb50SMintz, Yuval * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e8f1cb50SMintz, Yuval * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e8f1cb50SMintz, Yuval * SOFTWARE. 31fe56b9e6SYuval Mintz */ 32fe56b9e6SYuval Mintz 33fe56b9e6SYuval Mintz #include <linux/types.h> 34fe56b9e6SYuval Mintz #include <asm/byteorder.h> 35fe56b9e6SYuval Mintz #include <linux/io.h> 36fe56b9e6SYuval Mintz #include <linux/delay.h> 37fe56b9e6SYuval Mintz #include <linux/dma-mapping.h> 38fe56b9e6SYuval Mintz #include <linux/errno.h> 39fe56b9e6SYuval Mintz #include <linux/kernel.h> 40fe56b9e6SYuval Mintz #include <linux/mutex.h> 41fe56b9e6SYuval Mintz #include <linux/pci.h> 42fe56b9e6SYuval Mintz #include <linux/slab.h> 43fe56b9e6SYuval Mintz #include <linux/string.h> 44a91eb52aSYuval Mintz #include <linux/vmalloc.h> 45fe56b9e6SYuval Mintz #include <linux/etherdevice.h> 46fe56b9e6SYuval Mintz #include <linux/qed/qed_chain.h> 47fe56b9e6SYuval Mintz #include <linux/qed/qed_if.h> 48fe56b9e6SYuval Mintz #include "qed.h" 49fe56b9e6SYuval Mintz #include "qed_cxt.h" 5039651abdSSudarsana Reddy Kalluru #include "qed_dcbx.h" 51fe56b9e6SYuval Mintz #include "qed_dev_api.h" 521e128c81SArun Easi #include "qed_fcoe.h" 53fe56b9e6SYuval Mintz #include "qed_hsi.h" 54fe56b9e6SYuval Mintz #include "qed_hw.h" 55fe56b9e6SYuval Mintz #include "qed_init_ops.h" 56fe56b9e6SYuval Mintz #include "qed_int.h" 57fc831825SYuval Mintz #include "qed_iscsi.h" 580a7fb11cSYuval Mintz #include "qed_ll2.h" 59fe56b9e6SYuval Mintz #include "qed_mcp.h" 601d6cff4fSYuval Mintz #include "qed_ooo.h" 61fe56b9e6SYuval Mintz #include "qed_reg_addr.h" 62fe56b9e6SYuval Mintz #include "qed_sp.h" 6332a47e72SYuval Mintz #include "qed_sriov.h" 640b55e27dSYuval Mintz #include "qed_vf.h" 6551ff1725SRam Amrani #include "qed_roce.h" 66fe56b9e6SYuval Mintz 670caf5b26SWei Yongjun static DEFINE_SPINLOCK(qm_lock); 6839651abdSSudarsana Reddy Kalluru 6951ff1725SRam Amrani #define QED_MIN_DPIS (4) 7051ff1725SRam Amrani #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) 7151ff1725SRam Amrani 72fe56b9e6SYuval Mintz /* API common to all protocols */ 73c2035eeaSRam Amrani enum BAR_ID { 74c2035eeaSRam Amrani BAR_ID_0, /* used for GRC */ 75c2035eeaSRam Amrani BAR_ID_1 /* Used for doorbells */ 76c2035eeaSRam Amrani }; 77c2035eeaSRam Amrani 7815582962SRahul Verma static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, 7915582962SRahul Verma struct qed_ptt *p_ptt, enum BAR_ID bar_id) 80c2035eeaSRam Amrani { 81c2035eeaSRam Amrani u32 bar_reg = (bar_id == BAR_ID_0 ? 82c2035eeaSRam Amrani PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 831408cc1fSYuval Mintz u32 val; 84c2035eeaSRam Amrani 851408cc1fSYuval Mintz if (IS_VF(p_hwfn->cdev)) 861408cc1fSYuval Mintz return 1 << 17; 871408cc1fSYuval Mintz 8815582962SRahul Verma val = qed_rd(p_hwfn, p_ptt, bar_reg); 89c2035eeaSRam Amrani if (val) 90c2035eeaSRam Amrani return 1 << (val + 15); 91c2035eeaSRam Amrani 92c2035eeaSRam Amrani /* Old MFW initialized above registered only conditionally */ 93c2035eeaSRam Amrani if (p_hwfn->cdev->num_hwfns > 1) { 94c2035eeaSRam Amrani DP_INFO(p_hwfn, 95c2035eeaSRam Amrani "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 96c2035eeaSRam Amrani return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 97c2035eeaSRam Amrani } else { 98c2035eeaSRam Amrani DP_INFO(p_hwfn, 99c2035eeaSRam Amrani "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 100c2035eeaSRam Amrani return 512 * 1024; 101c2035eeaSRam Amrani } 102c2035eeaSRam Amrani } 103c2035eeaSRam Amrani 1041a635e48SYuval Mintz void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) 105fe56b9e6SYuval Mintz { 106fe56b9e6SYuval Mintz u32 i; 107fe56b9e6SYuval Mintz 108fe56b9e6SYuval Mintz cdev->dp_level = dp_level; 109fe56b9e6SYuval Mintz cdev->dp_module = dp_module; 110fe56b9e6SYuval Mintz for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 111fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 112fe56b9e6SYuval Mintz 113fe56b9e6SYuval Mintz p_hwfn->dp_level = dp_level; 114fe56b9e6SYuval Mintz p_hwfn->dp_module = dp_module; 115fe56b9e6SYuval Mintz } 116fe56b9e6SYuval Mintz } 117fe56b9e6SYuval Mintz 118fe56b9e6SYuval Mintz void qed_init_struct(struct qed_dev *cdev) 119fe56b9e6SYuval Mintz { 120fe56b9e6SYuval Mintz u8 i; 121fe56b9e6SYuval Mintz 122fe56b9e6SYuval Mintz for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 123fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 124fe56b9e6SYuval Mintz 125fe56b9e6SYuval Mintz p_hwfn->cdev = cdev; 126fe56b9e6SYuval Mintz p_hwfn->my_id = i; 127fe56b9e6SYuval Mintz p_hwfn->b_active = false; 128fe56b9e6SYuval Mintz 129fe56b9e6SYuval Mintz mutex_init(&p_hwfn->dmae_info.mutex); 130fe56b9e6SYuval Mintz } 131fe56b9e6SYuval Mintz 132fe56b9e6SYuval Mintz /* hwfn 0 is always active */ 133fe56b9e6SYuval Mintz cdev->hwfns[0].b_active = true; 134fe56b9e6SYuval Mintz 135fe56b9e6SYuval Mintz /* set the default cache alignment to 128 */ 136fe56b9e6SYuval Mintz cdev->cache_shift = 7; 137fe56b9e6SYuval Mintz } 138fe56b9e6SYuval Mintz 139fe56b9e6SYuval Mintz static void qed_qm_info_free(struct qed_hwfn *p_hwfn) 140fe56b9e6SYuval Mintz { 141fe56b9e6SYuval Mintz struct qed_qm_info *qm_info = &p_hwfn->qm_info; 142fe56b9e6SYuval Mintz 143fe56b9e6SYuval Mintz kfree(qm_info->qm_pq_params); 144fe56b9e6SYuval Mintz qm_info->qm_pq_params = NULL; 145fe56b9e6SYuval Mintz kfree(qm_info->qm_vport_params); 146fe56b9e6SYuval Mintz qm_info->qm_vport_params = NULL; 147fe56b9e6SYuval Mintz kfree(qm_info->qm_port_params); 148fe56b9e6SYuval Mintz qm_info->qm_port_params = NULL; 149bcd197c8SManish Chopra kfree(qm_info->wfq_data); 150bcd197c8SManish Chopra qm_info->wfq_data = NULL; 151fe56b9e6SYuval Mintz } 152fe56b9e6SYuval Mintz 153fe56b9e6SYuval Mintz void qed_resc_free(struct qed_dev *cdev) 154fe56b9e6SYuval Mintz { 155fe56b9e6SYuval Mintz int i; 156fe56b9e6SYuval Mintz 1571408cc1fSYuval Mintz if (IS_VF(cdev)) 1581408cc1fSYuval Mintz return; 1591408cc1fSYuval Mintz 160fe56b9e6SYuval Mintz kfree(cdev->fw_data); 161fe56b9e6SYuval Mintz cdev->fw_data = NULL; 162fe56b9e6SYuval Mintz 163fe56b9e6SYuval Mintz kfree(cdev->reset_stats); 1643587cb87STomer Tayar cdev->reset_stats = NULL; 165fe56b9e6SYuval Mintz 166fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 167fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 168fe56b9e6SYuval Mintz 169fe56b9e6SYuval Mintz qed_cxt_mngr_free(p_hwfn); 170fe56b9e6SYuval Mintz qed_qm_info_free(p_hwfn); 171fe56b9e6SYuval Mintz qed_spq_free(p_hwfn); 1723587cb87STomer Tayar qed_eq_free(p_hwfn); 1733587cb87STomer Tayar qed_consq_free(p_hwfn); 174fe56b9e6SYuval Mintz qed_int_free(p_hwfn); 1750a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2 1763587cb87STomer Tayar qed_ll2_free(p_hwfn); 1770a7fb11cSYuval Mintz #endif 1781e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 1793587cb87STomer Tayar qed_fcoe_free(p_hwfn); 1801e128c81SArun Easi 1811d6cff4fSYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 1823587cb87STomer Tayar qed_iscsi_free(p_hwfn); 1833587cb87STomer Tayar qed_ooo_free(p_hwfn); 1841d6cff4fSYuval Mintz } 18532a47e72SYuval Mintz qed_iov_free(p_hwfn); 186fe56b9e6SYuval Mintz qed_dmae_info_free(p_hwfn); 187270837b3Ssudarsana.kalluru@cavium.com qed_dcbx_info_free(p_hwfn); 188fe56b9e6SYuval Mintz } 189fe56b9e6SYuval Mintz } 190fe56b9e6SYuval Mintz 191b5a9ee7cSAriel Elior /******************** QM initialization *******************/ 192b5a9ee7cSAriel Elior #define ACTIVE_TCS_BMAP 0x9f 193b5a9ee7cSAriel Elior #define ACTIVE_TCS_BMAP_4PORT_K2 0xf 194b5a9ee7cSAriel Elior 195b5a9ee7cSAriel Elior /* determines the physical queue flags for a given PF. */ 196b5a9ee7cSAriel Elior static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) 197fe56b9e6SYuval Mintz { 198b5a9ee7cSAriel Elior u32 flags; 199fe56b9e6SYuval Mintz 200b5a9ee7cSAriel Elior /* common flags */ 201b5a9ee7cSAriel Elior flags = PQ_FLAGS_LB; 202fe56b9e6SYuval Mintz 203b5a9ee7cSAriel Elior /* feature flags */ 204b5a9ee7cSAriel Elior if (IS_QED_SRIOV(p_hwfn->cdev)) 205b5a9ee7cSAriel Elior flags |= PQ_FLAGS_VFS; 206fe56b9e6SYuval Mintz 207b5a9ee7cSAriel Elior /* protocol flags */ 208b5a9ee7cSAriel Elior switch (p_hwfn->hw_info.personality) { 209b5a9ee7cSAriel Elior case QED_PCI_ETH: 210b5a9ee7cSAriel Elior flags |= PQ_FLAGS_MCOS; 211b5a9ee7cSAriel Elior break; 212b5a9ee7cSAriel Elior case QED_PCI_FCOE: 213b5a9ee7cSAriel Elior flags |= PQ_FLAGS_OFLD; 214b5a9ee7cSAriel Elior break; 215b5a9ee7cSAriel Elior case QED_PCI_ISCSI: 216b5a9ee7cSAriel Elior flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 217b5a9ee7cSAriel Elior break; 218b5a9ee7cSAriel Elior case QED_PCI_ETH_ROCE: 219b5a9ee7cSAriel Elior flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 220b5a9ee7cSAriel Elior break; 221b5a9ee7cSAriel Elior default: 222fe56b9e6SYuval Mintz DP_ERR(p_hwfn, 223b5a9ee7cSAriel Elior "unknown personality %d\n", p_hwfn->hw_info.personality); 224b5a9ee7cSAriel Elior return 0; 225fe56b9e6SYuval Mintz } 226fe56b9e6SYuval Mintz 227b5a9ee7cSAriel Elior return flags; 228b5a9ee7cSAriel Elior } 229b5a9ee7cSAriel Elior 230b5a9ee7cSAriel Elior /* Getters for resource amounts necessary for qm initialization */ 231b5a9ee7cSAriel Elior u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) 232b5a9ee7cSAriel Elior { 233b5a9ee7cSAriel Elior return p_hwfn->hw_info.num_hw_tc; 234b5a9ee7cSAriel Elior } 235b5a9ee7cSAriel Elior 236b5a9ee7cSAriel Elior u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) 237b5a9ee7cSAriel Elior { 238b5a9ee7cSAriel Elior return IS_QED_SRIOV(p_hwfn->cdev) ? 239b5a9ee7cSAriel Elior p_hwfn->cdev->p_iov_info->total_vfs : 0; 240b5a9ee7cSAriel Elior } 241b5a9ee7cSAriel Elior 242b5a9ee7cSAriel Elior #define NUM_DEFAULT_RLS 1 243b5a9ee7cSAriel Elior 244b5a9ee7cSAriel Elior u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) 245b5a9ee7cSAriel Elior { 246b5a9ee7cSAriel Elior u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); 247b5a9ee7cSAriel Elior 248b5a9ee7cSAriel Elior /* num RLs can't exceed resource amount of rls or vports */ 249b5a9ee7cSAriel Elior num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), 250b5a9ee7cSAriel Elior RESC_NUM(p_hwfn, QED_VPORT)); 251b5a9ee7cSAriel Elior 252b5a9ee7cSAriel Elior /* Make sure after we reserve there's something left */ 253b5a9ee7cSAriel Elior if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) 254b5a9ee7cSAriel Elior return 0; 255b5a9ee7cSAriel Elior 256b5a9ee7cSAriel Elior /* subtract rls necessary for VFs and one default one for the PF */ 257b5a9ee7cSAriel Elior num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 258b5a9ee7cSAriel Elior 259b5a9ee7cSAriel Elior return num_pf_rls; 260b5a9ee7cSAriel Elior } 261b5a9ee7cSAriel Elior 262b5a9ee7cSAriel Elior u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) 263b5a9ee7cSAriel Elior { 264b5a9ee7cSAriel Elior u32 pq_flags = qed_get_pq_flags(p_hwfn); 265b5a9ee7cSAriel Elior 266b5a9ee7cSAriel Elior /* all pqs share the same vport, except for vfs and pf_rl pqs */ 267b5a9ee7cSAriel Elior return (!!(PQ_FLAGS_RLS & pq_flags)) * 268b5a9ee7cSAriel Elior qed_init_qm_get_num_pf_rls(p_hwfn) + 269b5a9ee7cSAriel Elior (!!(PQ_FLAGS_VFS & pq_flags)) * 270b5a9ee7cSAriel Elior qed_init_qm_get_num_vfs(p_hwfn) + 1; 271b5a9ee7cSAriel Elior } 272b5a9ee7cSAriel Elior 273b5a9ee7cSAriel Elior /* calc amount of PQs according to the requested flags */ 274b5a9ee7cSAriel Elior u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) 275b5a9ee7cSAriel Elior { 276b5a9ee7cSAriel Elior u32 pq_flags = qed_get_pq_flags(p_hwfn); 277b5a9ee7cSAriel Elior 278b5a9ee7cSAriel Elior return (!!(PQ_FLAGS_RLS & pq_flags)) * 279b5a9ee7cSAriel Elior qed_init_qm_get_num_pf_rls(p_hwfn) + 280b5a9ee7cSAriel Elior (!!(PQ_FLAGS_MCOS & pq_flags)) * 281b5a9ee7cSAriel Elior qed_init_qm_get_num_tcs(p_hwfn) + 282b5a9ee7cSAriel Elior (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) + 283b5a9ee7cSAriel Elior (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) + 284b5a9ee7cSAriel Elior (!!(PQ_FLAGS_LLT & pq_flags)) + 285b5a9ee7cSAriel Elior (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn); 286b5a9ee7cSAriel Elior } 287b5a9ee7cSAriel Elior 288b5a9ee7cSAriel Elior /* initialize the top level QM params */ 289b5a9ee7cSAriel Elior static void qed_init_qm_params(struct qed_hwfn *p_hwfn) 290b5a9ee7cSAriel Elior { 291b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 292b5a9ee7cSAriel Elior bool four_port; 293b5a9ee7cSAriel Elior 294b5a9ee7cSAriel Elior /* pq and vport bases for this PF */ 295b5a9ee7cSAriel Elior qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); 296b5a9ee7cSAriel Elior qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); 297b5a9ee7cSAriel Elior 298b5a9ee7cSAriel Elior /* rate limiting and weighted fair queueing are always enabled */ 299b5a9ee7cSAriel Elior qm_info->vport_rl_en = 1; 300b5a9ee7cSAriel Elior qm_info->vport_wfq_en = 1; 301b5a9ee7cSAriel Elior 302b5a9ee7cSAriel Elior /* TC config is different for AH 4 port */ 303b5a9ee7cSAriel Elior four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2; 304b5a9ee7cSAriel Elior 305b5a9ee7cSAriel Elior /* in AH 4 port we have fewer TCs per port */ 306b5a9ee7cSAriel Elior qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : 307b5a9ee7cSAriel Elior NUM_OF_PHYS_TCS; 308b5a9ee7cSAriel Elior 309b5a9ee7cSAriel Elior /* unless MFW indicated otherwise, ooo_tc == 3 for 310b5a9ee7cSAriel Elior * AH 4-port and 4 otherwise. 311fe56b9e6SYuval Mintz */ 312b5a9ee7cSAriel Elior if (!qm_info->ooo_tc) 313b5a9ee7cSAriel Elior qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : 314b5a9ee7cSAriel Elior DCBX_TCP_OOO_TC; 315dbb799c3SYuval Mintz } 316dbb799c3SYuval Mintz 317b5a9ee7cSAriel Elior /* initialize qm vport params */ 318b5a9ee7cSAriel Elior static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) 319b5a9ee7cSAriel Elior { 320b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 321b5a9ee7cSAriel Elior u8 i; 322fe56b9e6SYuval Mintz 323b5a9ee7cSAriel Elior /* all vports participate in weighted fair queueing */ 324b5a9ee7cSAriel Elior for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++) 325b5a9ee7cSAriel Elior qm_info->qm_vport_params[i].vport_wfq = 1; 326fe56b9e6SYuval Mintz } 327fe56b9e6SYuval Mintz 328b5a9ee7cSAriel Elior /* initialize qm port params */ 329b5a9ee7cSAriel Elior static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) 330b5a9ee7cSAriel Elior { 331fe56b9e6SYuval Mintz /* Initialize qm port parameters */ 332b5a9ee7cSAriel Elior u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines; 333b5a9ee7cSAriel Elior 334b5a9ee7cSAriel Elior /* indicate how ooo and high pri traffic is dealt with */ 335b5a9ee7cSAriel Elior active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 336b5a9ee7cSAriel Elior ACTIVE_TCS_BMAP_4PORT_K2 : 337b5a9ee7cSAriel Elior ACTIVE_TCS_BMAP; 338b5a9ee7cSAriel Elior 339fe56b9e6SYuval Mintz for (i = 0; i < num_ports; i++) { 340b5a9ee7cSAriel Elior struct init_qm_port_params *p_qm_port = 341b5a9ee7cSAriel Elior &p_hwfn->qm_info.qm_port_params[i]; 342b5a9ee7cSAriel Elior 343fe56b9e6SYuval Mintz p_qm_port->active = 1; 344b5a9ee7cSAriel Elior p_qm_port->active_phys_tcs = active_phys_tcs; 345fe56b9e6SYuval Mintz p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; 346fe56b9e6SYuval Mintz p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 347fe56b9e6SYuval Mintz } 348b5a9ee7cSAriel Elior } 349fe56b9e6SYuval Mintz 350b5a9ee7cSAriel Elior /* Reset the params which must be reset for qm init. QM init may be called as 351b5a9ee7cSAriel Elior * a result of flows other than driver load (e.g. dcbx renegotiation). Other 352b5a9ee7cSAriel Elior * params may be affected by the init but would simply recalculate to the same 353b5a9ee7cSAriel Elior * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 354b5a9ee7cSAriel Elior * affected as these amounts stay the same. 355b5a9ee7cSAriel Elior */ 356b5a9ee7cSAriel Elior static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn) 357b5a9ee7cSAriel Elior { 358b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 359fe56b9e6SYuval Mintz 360b5a9ee7cSAriel Elior qm_info->num_pqs = 0; 361b5a9ee7cSAriel Elior qm_info->num_vports = 0; 362b5a9ee7cSAriel Elior qm_info->num_pf_rls = 0; 363b5a9ee7cSAriel Elior qm_info->num_vf_pqs = 0; 364b5a9ee7cSAriel Elior qm_info->first_vf_pq = 0; 365b5a9ee7cSAriel Elior qm_info->first_mcos_pq = 0; 366b5a9ee7cSAriel Elior qm_info->first_rl_pq = 0; 367b5a9ee7cSAriel Elior } 368fe56b9e6SYuval Mintz 369b5a9ee7cSAriel Elior static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) 370b5a9ee7cSAriel Elior { 371b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 372b5a9ee7cSAriel Elior 373b5a9ee7cSAriel Elior qm_info->num_vports++; 374b5a9ee7cSAriel Elior 375b5a9ee7cSAriel Elior if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) 376b5a9ee7cSAriel Elior DP_ERR(p_hwfn, 377b5a9ee7cSAriel Elior "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", 378b5a9ee7cSAriel Elior qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); 379b5a9ee7cSAriel Elior } 380b5a9ee7cSAriel Elior 381b5a9ee7cSAriel Elior /* initialize a single pq and manage qm_info resources accounting. 382b5a9ee7cSAriel Elior * The pq_init_flags param determines whether the PQ is rate limited 383b5a9ee7cSAriel Elior * (for VF or PF) and whether a new vport is allocated to the pq or not 384b5a9ee7cSAriel Elior * (i.e. vport will be shared). 385b5a9ee7cSAriel Elior */ 386b5a9ee7cSAriel Elior 387b5a9ee7cSAriel Elior /* flags for pq init */ 388b5a9ee7cSAriel Elior #define PQ_INIT_SHARE_VPORT (1 << 0) 389b5a9ee7cSAriel Elior #define PQ_INIT_PF_RL (1 << 1) 390b5a9ee7cSAriel Elior #define PQ_INIT_VF_RL (1 << 2) 391b5a9ee7cSAriel Elior 392b5a9ee7cSAriel Elior /* defines for pq init */ 393b5a9ee7cSAriel Elior #define PQ_INIT_DEFAULT_WRR_GROUP 1 394b5a9ee7cSAriel Elior #define PQ_INIT_DEFAULT_TC 0 395b5a9ee7cSAriel Elior #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 396b5a9ee7cSAriel Elior 397b5a9ee7cSAriel Elior static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, 398b5a9ee7cSAriel Elior struct qed_qm_info *qm_info, 399b5a9ee7cSAriel Elior u8 tc, u32 pq_init_flags) 400b5a9ee7cSAriel Elior { 401b5a9ee7cSAriel Elior u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn); 402b5a9ee7cSAriel Elior 403b5a9ee7cSAriel Elior if (pq_idx > max_pq) 404b5a9ee7cSAriel Elior DP_ERR(p_hwfn, 405b5a9ee7cSAriel Elior "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 406b5a9ee7cSAriel Elior 407b5a9ee7cSAriel Elior /* init pq params */ 408b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + 409b5a9ee7cSAriel Elior qm_info->num_vports; 410b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].tc_id = tc; 411b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 412b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].rl_valid = 413b5a9ee7cSAriel Elior (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); 414b5a9ee7cSAriel Elior 415b5a9ee7cSAriel Elior /* qm params accounting */ 416b5a9ee7cSAriel Elior qm_info->num_pqs++; 417b5a9ee7cSAriel Elior if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 418b5a9ee7cSAriel Elior qm_info->num_vports++; 419b5a9ee7cSAriel Elior 420b5a9ee7cSAriel Elior if (pq_init_flags & PQ_INIT_PF_RL) 421b5a9ee7cSAriel Elior qm_info->num_pf_rls++; 422b5a9ee7cSAriel Elior 423b5a9ee7cSAriel Elior if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) 424b5a9ee7cSAriel Elior DP_ERR(p_hwfn, 425b5a9ee7cSAriel Elior "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", 426b5a9ee7cSAriel Elior qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); 427b5a9ee7cSAriel Elior 428b5a9ee7cSAriel Elior if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn)) 429b5a9ee7cSAriel Elior DP_ERR(p_hwfn, 430b5a9ee7cSAriel Elior "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", 431b5a9ee7cSAriel Elior qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn)); 432b5a9ee7cSAriel Elior } 433b5a9ee7cSAriel Elior 434b5a9ee7cSAriel Elior /* get pq index according to PQ_FLAGS */ 435b5a9ee7cSAriel Elior static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, 436b5a9ee7cSAriel Elior u32 pq_flags) 437b5a9ee7cSAriel Elior { 438b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 439b5a9ee7cSAriel Elior 440b5a9ee7cSAriel Elior /* Can't have multiple flags set here */ 441b5a9ee7cSAriel Elior if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 442b5a9ee7cSAriel Elior goto err; 443b5a9ee7cSAriel Elior 444b5a9ee7cSAriel Elior switch (pq_flags) { 445b5a9ee7cSAriel Elior case PQ_FLAGS_RLS: 446b5a9ee7cSAriel Elior return &qm_info->first_rl_pq; 447b5a9ee7cSAriel Elior case PQ_FLAGS_MCOS: 448b5a9ee7cSAriel Elior return &qm_info->first_mcos_pq; 449b5a9ee7cSAriel Elior case PQ_FLAGS_LB: 450b5a9ee7cSAriel Elior return &qm_info->pure_lb_pq; 451b5a9ee7cSAriel Elior case PQ_FLAGS_OOO: 452b5a9ee7cSAriel Elior return &qm_info->ooo_pq; 453b5a9ee7cSAriel Elior case PQ_FLAGS_ACK: 454b5a9ee7cSAriel Elior return &qm_info->pure_ack_pq; 455b5a9ee7cSAriel Elior case PQ_FLAGS_OFLD: 456b5a9ee7cSAriel Elior return &qm_info->offload_pq; 457b5a9ee7cSAriel Elior case PQ_FLAGS_LLT: 458b5a9ee7cSAriel Elior return &qm_info->low_latency_pq; 459b5a9ee7cSAriel Elior case PQ_FLAGS_VFS: 460b5a9ee7cSAriel Elior return &qm_info->first_vf_pq; 461b5a9ee7cSAriel Elior default: 462b5a9ee7cSAriel Elior goto err; 463b5a9ee7cSAriel Elior } 464b5a9ee7cSAriel Elior 465b5a9ee7cSAriel Elior err: 466b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 467b5a9ee7cSAriel Elior return NULL; 468b5a9ee7cSAriel Elior } 469b5a9ee7cSAriel Elior 470b5a9ee7cSAriel Elior /* save pq index in qm info */ 471b5a9ee7cSAriel Elior static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn, 472b5a9ee7cSAriel Elior u32 pq_flags, u16 pq_val) 473b5a9ee7cSAriel Elior { 474b5a9ee7cSAriel Elior u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 475b5a9ee7cSAriel Elior 476b5a9ee7cSAriel Elior *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 477b5a9ee7cSAriel Elior } 478b5a9ee7cSAriel Elior 479b5a9ee7cSAriel Elior /* get tx pq index, with the PQ TX base already set (ready for context init) */ 480b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags) 481b5a9ee7cSAriel Elior { 482b5a9ee7cSAriel Elior u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 483b5a9ee7cSAriel Elior 484b5a9ee7cSAriel Elior return *base_pq_idx + CM_TX_PQ_BASE; 485b5a9ee7cSAriel Elior } 486b5a9ee7cSAriel Elior 487b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) 488b5a9ee7cSAriel Elior { 489b5a9ee7cSAriel Elior u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); 490b5a9ee7cSAriel Elior 491b5a9ee7cSAriel Elior if (tc > max_tc) 492b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 493b5a9ee7cSAriel Elior 494b5a9ee7cSAriel Elior return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 495b5a9ee7cSAriel Elior } 496b5a9ee7cSAriel Elior 497b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) 498b5a9ee7cSAriel Elior { 499b5a9ee7cSAriel Elior u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); 500b5a9ee7cSAriel Elior 501b5a9ee7cSAriel Elior if (vf > max_vf) 502b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 503b5a9ee7cSAriel Elior 504b5a9ee7cSAriel Elior return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 505b5a9ee7cSAriel Elior } 506b5a9ee7cSAriel Elior 507b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl) 508b5a9ee7cSAriel Elior { 509b5a9ee7cSAriel Elior u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn); 510b5a9ee7cSAriel Elior 511b5a9ee7cSAriel Elior if (rl > max_rl) 512b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 513b5a9ee7cSAriel Elior 514b5a9ee7cSAriel Elior return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 515b5a9ee7cSAriel Elior } 516b5a9ee7cSAriel Elior 517b5a9ee7cSAriel Elior /* Functions for creating specific types of pqs */ 518b5a9ee7cSAriel Elior static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) 519b5a9ee7cSAriel Elior { 520b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 521b5a9ee7cSAriel Elior 522b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 523b5a9ee7cSAriel Elior return; 524b5a9ee7cSAriel Elior 525b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 526b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 527b5a9ee7cSAriel Elior } 528b5a9ee7cSAriel Elior 529b5a9ee7cSAriel Elior static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn) 530b5a9ee7cSAriel Elior { 531b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 532b5a9ee7cSAriel Elior 533b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 534b5a9ee7cSAriel Elior return; 535b5a9ee7cSAriel Elior 536b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 537b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 538b5a9ee7cSAriel Elior } 539b5a9ee7cSAriel Elior 540b5a9ee7cSAriel Elior static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn) 541b5a9ee7cSAriel Elior { 542b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 543b5a9ee7cSAriel Elior 544b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 545b5a9ee7cSAriel Elior return; 546b5a9ee7cSAriel Elior 547b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 548b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 549b5a9ee7cSAriel Elior } 550b5a9ee7cSAriel Elior 551b5a9ee7cSAriel Elior static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) 552b5a9ee7cSAriel Elior { 553b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 554b5a9ee7cSAriel Elior 555b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 556b5a9ee7cSAriel Elior return; 557b5a9ee7cSAriel Elior 558b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 559b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 560b5a9ee7cSAriel Elior } 561b5a9ee7cSAriel Elior 562b5a9ee7cSAriel Elior static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) 563b5a9ee7cSAriel Elior { 564b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 565b5a9ee7cSAriel Elior 566b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) 567b5a9ee7cSAriel Elior return; 568b5a9ee7cSAriel Elior 569b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); 570b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 571b5a9ee7cSAriel Elior } 572b5a9ee7cSAriel Elior 573b5a9ee7cSAriel Elior static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn) 574b5a9ee7cSAriel Elior { 575b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 576b5a9ee7cSAriel Elior u8 tc_idx; 577b5a9ee7cSAriel Elior 578b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 579b5a9ee7cSAriel Elior return; 580b5a9ee7cSAriel Elior 581b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 582b5a9ee7cSAriel Elior for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++) 583b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 584b5a9ee7cSAriel Elior } 585b5a9ee7cSAriel Elior 586b5a9ee7cSAriel Elior static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn) 587b5a9ee7cSAriel Elior { 588b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 589b5a9ee7cSAriel Elior u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); 590b5a9ee7cSAriel Elior 591b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 592b5a9ee7cSAriel Elior return; 593b5a9ee7cSAriel Elior 594b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 5951408cc1fSYuval Mintz qm_info->num_vf_pqs = num_vfs; 596b5a9ee7cSAriel Elior for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 597b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, 598b5a9ee7cSAriel Elior qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); 599b5a9ee7cSAriel Elior } 600fe56b9e6SYuval Mintz 601b5a9ee7cSAriel Elior static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn) 602b5a9ee7cSAriel Elior { 603b5a9ee7cSAriel Elior u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn); 604b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 605a64b02d5SManish Chopra 606b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 607b5a9ee7cSAriel Elior return; 608b5a9ee7cSAriel Elior 609b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 610b5a9ee7cSAriel Elior for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 611b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); 612b5a9ee7cSAriel Elior } 613b5a9ee7cSAriel Elior 614b5a9ee7cSAriel Elior static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn) 615b5a9ee7cSAriel Elior { 616b5a9ee7cSAriel Elior /* rate limited pqs, must come first (FW assumption) */ 617b5a9ee7cSAriel Elior qed_init_qm_rl_pqs(p_hwfn); 618b5a9ee7cSAriel Elior 619b5a9ee7cSAriel Elior /* pqs for multi cos */ 620b5a9ee7cSAriel Elior qed_init_qm_mcos_pqs(p_hwfn); 621b5a9ee7cSAriel Elior 622b5a9ee7cSAriel Elior /* pure loopback pq */ 623b5a9ee7cSAriel Elior qed_init_qm_lb_pq(p_hwfn); 624b5a9ee7cSAriel Elior 625b5a9ee7cSAriel Elior /* out of order pq */ 626b5a9ee7cSAriel Elior qed_init_qm_ooo_pq(p_hwfn); 627b5a9ee7cSAriel Elior 628b5a9ee7cSAriel Elior /* pure ack pq */ 629b5a9ee7cSAriel Elior qed_init_qm_pure_ack_pq(p_hwfn); 630b5a9ee7cSAriel Elior 631b5a9ee7cSAriel Elior /* pq for offloaded protocol */ 632b5a9ee7cSAriel Elior qed_init_qm_offload_pq(p_hwfn); 633b5a9ee7cSAriel Elior 634b5a9ee7cSAriel Elior /* low latency pq */ 635b5a9ee7cSAriel Elior qed_init_qm_low_latency_pq(p_hwfn); 636b5a9ee7cSAriel Elior 637b5a9ee7cSAriel Elior /* done sharing vports */ 638b5a9ee7cSAriel Elior qed_init_qm_advance_vport(p_hwfn); 639b5a9ee7cSAriel Elior 640b5a9ee7cSAriel Elior /* pqs for vfs */ 641b5a9ee7cSAriel Elior qed_init_qm_vf_pqs(p_hwfn); 642b5a9ee7cSAriel Elior } 643b5a9ee7cSAriel Elior 644b5a9ee7cSAriel Elior /* compare values of getters against resources amounts */ 645b5a9ee7cSAriel Elior static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn) 646b5a9ee7cSAriel Elior { 647b5a9ee7cSAriel Elior if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) { 648b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 649b5a9ee7cSAriel Elior return -EINVAL; 650b5a9ee7cSAriel Elior } 651b5a9ee7cSAriel Elior 652b5a9ee7cSAriel Elior if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) { 653b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 654b5a9ee7cSAriel Elior return -EINVAL; 655b5a9ee7cSAriel Elior } 656fe56b9e6SYuval Mintz 657fe56b9e6SYuval Mintz return 0; 658b5a9ee7cSAriel Elior } 659fe56b9e6SYuval Mintz 660b5a9ee7cSAriel Elior static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) 661b5a9ee7cSAriel Elior { 662b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 663b5a9ee7cSAriel Elior struct init_qm_vport_params *vport; 664b5a9ee7cSAriel Elior struct init_qm_port_params *port; 665b5a9ee7cSAriel Elior struct init_qm_pq_params *pq; 666b5a9ee7cSAriel Elior int i, tc; 667b5a9ee7cSAriel Elior 668b5a9ee7cSAriel Elior /* top level params */ 669b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 670b5a9ee7cSAriel Elior NETIF_MSG_HW, 671b5a9ee7cSAriel Elior "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 672b5a9ee7cSAriel Elior qm_info->start_pq, 673b5a9ee7cSAriel Elior qm_info->start_vport, 674b5a9ee7cSAriel Elior qm_info->pure_lb_pq, 675b5a9ee7cSAriel Elior qm_info->offload_pq, qm_info->pure_ack_pq); 676b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 677b5a9ee7cSAriel Elior NETIF_MSG_HW, 678b5a9ee7cSAriel Elior "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", 679b5a9ee7cSAriel Elior qm_info->ooo_pq, 680b5a9ee7cSAriel Elior qm_info->first_vf_pq, 681b5a9ee7cSAriel Elior qm_info->num_pqs, 682b5a9ee7cSAriel Elior qm_info->num_vf_pqs, 683b5a9ee7cSAriel Elior qm_info->num_vports, qm_info->max_phys_tcs_per_port); 684b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 685b5a9ee7cSAriel Elior NETIF_MSG_HW, 686b5a9ee7cSAriel Elior "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 687b5a9ee7cSAriel Elior qm_info->pf_rl_en, 688b5a9ee7cSAriel Elior qm_info->pf_wfq_en, 689b5a9ee7cSAriel Elior qm_info->vport_rl_en, 690b5a9ee7cSAriel Elior qm_info->vport_wfq_en, 691b5a9ee7cSAriel Elior qm_info->pf_wfq, 692b5a9ee7cSAriel Elior qm_info->pf_rl, 693b5a9ee7cSAriel Elior qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn)); 694b5a9ee7cSAriel Elior 695b5a9ee7cSAriel Elior /* port table */ 696b5a9ee7cSAriel Elior for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) { 697b5a9ee7cSAriel Elior port = &(qm_info->qm_port_params[i]); 698b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 699b5a9ee7cSAriel Elior NETIF_MSG_HW, 700b5a9ee7cSAriel Elior "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", 701b5a9ee7cSAriel Elior i, 702b5a9ee7cSAriel Elior port->active, 703b5a9ee7cSAriel Elior port->active_phys_tcs, 704b5a9ee7cSAriel Elior port->num_pbf_cmd_lines, 705b5a9ee7cSAriel Elior port->num_btb_blocks, port->reserved); 706b5a9ee7cSAriel Elior } 707b5a9ee7cSAriel Elior 708b5a9ee7cSAriel Elior /* vport table */ 709b5a9ee7cSAriel Elior for (i = 0; i < qm_info->num_vports; i++) { 710b5a9ee7cSAriel Elior vport = &(qm_info->qm_vport_params[i]); 711b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 712b5a9ee7cSAriel Elior NETIF_MSG_HW, 713b5a9ee7cSAriel Elior "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", 714b5a9ee7cSAriel Elior qm_info->start_vport + i, 715b5a9ee7cSAriel Elior vport->vport_rl, vport->vport_wfq); 716b5a9ee7cSAriel Elior for (tc = 0; tc < NUM_OF_TCS; tc++) 717b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 718b5a9ee7cSAriel Elior NETIF_MSG_HW, 719b5a9ee7cSAriel Elior "%d ", vport->first_tx_pq_id[tc]); 720b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n"); 721b5a9ee7cSAriel Elior } 722b5a9ee7cSAriel Elior 723b5a9ee7cSAriel Elior /* pq table */ 724b5a9ee7cSAriel Elior for (i = 0; i < qm_info->num_pqs; i++) { 725b5a9ee7cSAriel Elior pq = &(qm_info->qm_pq_params[i]); 726b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 727b5a9ee7cSAriel Elior NETIF_MSG_HW, 728b5a9ee7cSAriel Elior "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", 729b5a9ee7cSAriel Elior qm_info->start_pq + i, 730b5a9ee7cSAriel Elior pq->vport_id, 731b5a9ee7cSAriel Elior pq->tc_id, pq->wrr_group, pq->rl_valid); 732b5a9ee7cSAriel Elior } 733b5a9ee7cSAriel Elior } 734b5a9ee7cSAriel Elior 735b5a9ee7cSAriel Elior static void qed_init_qm_info(struct qed_hwfn *p_hwfn) 736b5a9ee7cSAriel Elior { 737b5a9ee7cSAriel Elior /* reset params required for init run */ 738b5a9ee7cSAriel Elior qed_init_qm_reset_params(p_hwfn); 739b5a9ee7cSAriel Elior 740b5a9ee7cSAriel Elior /* init QM top level params */ 741b5a9ee7cSAriel Elior qed_init_qm_params(p_hwfn); 742b5a9ee7cSAriel Elior 743b5a9ee7cSAriel Elior /* init QM port params */ 744b5a9ee7cSAriel Elior qed_init_qm_port_params(p_hwfn); 745b5a9ee7cSAriel Elior 746b5a9ee7cSAriel Elior /* init QM vport params */ 747b5a9ee7cSAriel Elior qed_init_qm_vport_params(p_hwfn); 748b5a9ee7cSAriel Elior 749b5a9ee7cSAriel Elior /* init QM physical queue params */ 750b5a9ee7cSAriel Elior qed_init_qm_pq_params(p_hwfn); 751b5a9ee7cSAriel Elior 752b5a9ee7cSAriel Elior /* display all that init */ 753b5a9ee7cSAriel Elior qed_dp_init_qm_params(p_hwfn); 754fe56b9e6SYuval Mintz } 755fe56b9e6SYuval Mintz 75639651abdSSudarsana Reddy Kalluru /* This function reconfigures the QM pf on the fly. 75739651abdSSudarsana Reddy Kalluru * For this purpose we: 75839651abdSSudarsana Reddy Kalluru * 1. reconfigure the QM database 75939651abdSSudarsana Reddy Kalluru * 2. set new values to runtime arrat 76039651abdSSudarsana Reddy Kalluru * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 76139651abdSSudarsana Reddy Kalluru * 4. activate init tool in QM_PF stage 76239651abdSSudarsana Reddy Kalluru * 5. send an sdm_qm_cmd through rbc interface to release the QM 76339651abdSSudarsana Reddy Kalluru */ 76439651abdSSudarsana Reddy Kalluru int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 76539651abdSSudarsana Reddy Kalluru { 76639651abdSSudarsana Reddy Kalluru struct qed_qm_info *qm_info = &p_hwfn->qm_info; 76739651abdSSudarsana Reddy Kalluru bool b_rc; 76839651abdSSudarsana Reddy Kalluru int rc; 76939651abdSSudarsana Reddy Kalluru 77039651abdSSudarsana Reddy Kalluru /* initialize qed's qm data structure */ 771b5a9ee7cSAriel Elior qed_init_qm_info(p_hwfn); 77239651abdSSudarsana Reddy Kalluru 77339651abdSSudarsana Reddy Kalluru /* stop PF's qm queues */ 77439651abdSSudarsana Reddy Kalluru spin_lock_bh(&qm_lock); 77539651abdSSudarsana Reddy Kalluru b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 77639651abdSSudarsana Reddy Kalluru qm_info->start_pq, qm_info->num_pqs); 77739651abdSSudarsana Reddy Kalluru spin_unlock_bh(&qm_lock); 77839651abdSSudarsana Reddy Kalluru if (!b_rc) 77939651abdSSudarsana Reddy Kalluru return -EINVAL; 78039651abdSSudarsana Reddy Kalluru 78139651abdSSudarsana Reddy Kalluru /* clear the QM_PF runtime phase leftovers from previous init */ 78239651abdSSudarsana Reddy Kalluru qed_init_clear_rt_data(p_hwfn); 78339651abdSSudarsana Reddy Kalluru 78439651abdSSudarsana Reddy Kalluru /* prepare QM portion of runtime array */ 78515582962SRahul Verma qed_qm_init_pf(p_hwfn, p_ptt); 78639651abdSSudarsana Reddy Kalluru 78739651abdSSudarsana Reddy Kalluru /* activate init tool on runtime array */ 78839651abdSSudarsana Reddy Kalluru rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 78939651abdSSudarsana Reddy Kalluru p_hwfn->hw_info.hw_mode); 79039651abdSSudarsana Reddy Kalluru if (rc) 79139651abdSSudarsana Reddy Kalluru return rc; 79239651abdSSudarsana Reddy Kalluru 79339651abdSSudarsana Reddy Kalluru /* start PF's qm queues */ 79439651abdSSudarsana Reddy Kalluru spin_lock_bh(&qm_lock); 79539651abdSSudarsana Reddy Kalluru b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 79639651abdSSudarsana Reddy Kalluru qm_info->start_pq, qm_info->num_pqs); 79739651abdSSudarsana Reddy Kalluru spin_unlock_bh(&qm_lock); 79839651abdSSudarsana Reddy Kalluru if (!b_rc) 79939651abdSSudarsana Reddy Kalluru return -EINVAL; 80039651abdSSudarsana Reddy Kalluru 80139651abdSSudarsana Reddy Kalluru return 0; 80239651abdSSudarsana Reddy Kalluru } 80339651abdSSudarsana Reddy Kalluru 804b5a9ee7cSAriel Elior static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) 805b5a9ee7cSAriel Elior { 806b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 807b5a9ee7cSAriel Elior int rc; 808b5a9ee7cSAriel Elior 809b5a9ee7cSAriel Elior rc = qed_init_qm_sanity(p_hwfn); 810b5a9ee7cSAriel Elior if (rc) 811b5a9ee7cSAriel Elior goto alloc_err; 812b5a9ee7cSAriel Elior 813b5a9ee7cSAriel Elior qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * 814b5a9ee7cSAriel Elior qed_init_qm_get_num_pqs(p_hwfn), 815b5a9ee7cSAriel Elior GFP_KERNEL); 816b5a9ee7cSAriel Elior if (!qm_info->qm_pq_params) 817b5a9ee7cSAriel Elior goto alloc_err; 818b5a9ee7cSAriel Elior 819b5a9ee7cSAriel Elior qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * 820b5a9ee7cSAriel Elior qed_init_qm_get_num_vports(p_hwfn), 821b5a9ee7cSAriel Elior GFP_KERNEL); 822b5a9ee7cSAriel Elior if (!qm_info->qm_vport_params) 823b5a9ee7cSAriel Elior goto alloc_err; 824b5a9ee7cSAriel Elior 8252f7878c0SWei Yongjun qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * 826b5a9ee7cSAriel Elior p_hwfn->cdev->num_ports_in_engines, 827b5a9ee7cSAriel Elior GFP_KERNEL); 828b5a9ee7cSAriel Elior if (!qm_info->qm_port_params) 829b5a9ee7cSAriel Elior goto alloc_err; 830b5a9ee7cSAriel Elior 831b5a9ee7cSAriel Elior qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) * 832b5a9ee7cSAriel Elior qed_init_qm_get_num_vports(p_hwfn), 833b5a9ee7cSAriel Elior GFP_KERNEL); 834b5a9ee7cSAriel Elior if (!qm_info->wfq_data) 835b5a9ee7cSAriel Elior goto alloc_err; 836b5a9ee7cSAriel Elior 837b5a9ee7cSAriel Elior return 0; 838b5a9ee7cSAriel Elior 839b5a9ee7cSAriel Elior alloc_err: 840b5a9ee7cSAriel Elior DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); 841b5a9ee7cSAriel Elior qed_qm_info_free(p_hwfn); 842b5a9ee7cSAriel Elior return -ENOMEM; 843b5a9ee7cSAriel Elior } 844b5a9ee7cSAriel Elior 845fe56b9e6SYuval Mintz int qed_resc_alloc(struct qed_dev *cdev) 846fe56b9e6SYuval Mintz { 847f9dc4d1fSRam Amrani u32 rdma_tasks, excess_tasks; 848f9dc4d1fSRam Amrani u32 line_count; 849fe56b9e6SYuval Mintz int i, rc = 0; 850fe56b9e6SYuval Mintz 8511408cc1fSYuval Mintz if (IS_VF(cdev)) 8521408cc1fSYuval Mintz return rc; 8531408cc1fSYuval Mintz 854fe56b9e6SYuval Mintz cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); 855fe56b9e6SYuval Mintz if (!cdev->fw_data) 856fe56b9e6SYuval Mintz return -ENOMEM; 857fe56b9e6SYuval Mintz 858fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 859fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 860dbb799c3SYuval Mintz u32 n_eqes, num_cons; 861fe56b9e6SYuval Mintz 862fe56b9e6SYuval Mintz /* First allocate the context manager structure */ 863fe56b9e6SYuval Mintz rc = qed_cxt_mngr_alloc(p_hwfn); 864fe56b9e6SYuval Mintz if (rc) 865fe56b9e6SYuval Mintz goto alloc_err; 866fe56b9e6SYuval Mintz 867fe56b9e6SYuval Mintz /* Set the HW cid/tid numbers (in the contest manager) 868fe56b9e6SYuval Mintz * Must be done prior to any further computations. 869fe56b9e6SYuval Mintz */ 870f9dc4d1fSRam Amrani rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); 871fe56b9e6SYuval Mintz if (rc) 872fe56b9e6SYuval Mintz goto alloc_err; 873fe56b9e6SYuval Mintz 874b5a9ee7cSAriel Elior rc = qed_alloc_qm_data(p_hwfn); 875fe56b9e6SYuval Mintz if (rc) 876fe56b9e6SYuval Mintz goto alloc_err; 877fe56b9e6SYuval Mintz 878b5a9ee7cSAriel Elior /* init qm info */ 879b5a9ee7cSAriel Elior qed_init_qm_info(p_hwfn); 880b5a9ee7cSAriel Elior 881fe56b9e6SYuval Mintz /* Compute the ILT client partition */ 882f9dc4d1fSRam Amrani rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); 883f9dc4d1fSRam Amrani if (rc) { 884f9dc4d1fSRam Amrani DP_NOTICE(p_hwfn, 885f9dc4d1fSRam Amrani "too many ILT lines; re-computing with less lines\n"); 886f9dc4d1fSRam Amrani /* In case there are not enough ILT lines we reduce the 887f9dc4d1fSRam Amrani * number of RDMA tasks and re-compute. 888f9dc4d1fSRam Amrani */ 889f9dc4d1fSRam Amrani excess_tasks = 890f9dc4d1fSRam Amrani qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count); 891f9dc4d1fSRam Amrani if (!excess_tasks) 892f9dc4d1fSRam Amrani goto alloc_err; 893f9dc4d1fSRam Amrani 894f9dc4d1fSRam Amrani rdma_tasks = RDMA_MAX_TIDS - excess_tasks; 895f9dc4d1fSRam Amrani rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks); 896fe56b9e6SYuval Mintz if (rc) 897fe56b9e6SYuval Mintz goto alloc_err; 898fe56b9e6SYuval Mintz 899f9dc4d1fSRam Amrani rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); 900f9dc4d1fSRam Amrani if (rc) { 901f9dc4d1fSRam Amrani DP_ERR(p_hwfn, 902f9dc4d1fSRam Amrani "failed ILT compute. Requested too many lines: %u\n", 903f9dc4d1fSRam Amrani line_count); 904f9dc4d1fSRam Amrani 905f9dc4d1fSRam Amrani goto alloc_err; 906f9dc4d1fSRam Amrani } 907f9dc4d1fSRam Amrani } 908f9dc4d1fSRam Amrani 909fe56b9e6SYuval Mintz /* CID map / ILT shadow table / T2 910fe56b9e6SYuval Mintz * The talbes sizes are determined by the computations above 911fe56b9e6SYuval Mintz */ 912fe56b9e6SYuval Mintz rc = qed_cxt_tables_alloc(p_hwfn); 913fe56b9e6SYuval Mintz if (rc) 914fe56b9e6SYuval Mintz goto alloc_err; 915fe56b9e6SYuval Mintz 916fe56b9e6SYuval Mintz /* SPQ, must follow ILT because initializes SPQ context */ 917fe56b9e6SYuval Mintz rc = qed_spq_alloc(p_hwfn); 918fe56b9e6SYuval Mintz if (rc) 919fe56b9e6SYuval Mintz goto alloc_err; 920fe56b9e6SYuval Mintz 921fe56b9e6SYuval Mintz /* SP status block allocation */ 922fe56b9e6SYuval Mintz p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn, 923fe56b9e6SYuval Mintz RESERVED_PTT_DPC); 924fe56b9e6SYuval Mintz 925fe56b9e6SYuval Mintz rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 926fe56b9e6SYuval Mintz if (rc) 927fe56b9e6SYuval Mintz goto alloc_err; 928fe56b9e6SYuval Mintz 92932a47e72SYuval Mintz rc = qed_iov_alloc(p_hwfn); 93032a47e72SYuval Mintz if (rc) 93132a47e72SYuval Mintz goto alloc_err; 93232a47e72SYuval Mintz 933fe56b9e6SYuval Mintz /* EQ */ 934dbb799c3SYuval Mintz n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); 935dbb799c3SYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { 936dbb799c3SYuval Mintz num_cons = qed_cxt_get_proto_cid_count(p_hwfn, 937dbb799c3SYuval Mintz PROTOCOLID_ROCE, 9388c93beafSYuval Mintz NULL) * 2; 939dbb799c3SYuval Mintz n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 940dbb799c3SYuval Mintz } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 941dbb799c3SYuval Mintz num_cons = 942dbb799c3SYuval Mintz qed_cxt_get_proto_cid_count(p_hwfn, 9438c93beafSYuval Mintz PROTOCOLID_ISCSI, 9448c93beafSYuval Mintz NULL); 945dbb799c3SYuval Mintz n_eqes += 2 * num_cons; 946dbb799c3SYuval Mintz } 947dbb799c3SYuval Mintz 948dbb799c3SYuval Mintz if (n_eqes > 0xFFFF) { 949dbb799c3SYuval Mintz DP_ERR(p_hwfn, 950dbb799c3SYuval Mintz "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", 951dbb799c3SYuval Mintz n_eqes, 0xFFFF); 9523587cb87STomer Tayar goto alloc_no_mem; 9539b15acbfSDan Carpenter } 954dbb799c3SYuval Mintz 9553587cb87STomer Tayar rc = qed_eq_alloc(p_hwfn, (u16) n_eqes); 9563587cb87STomer Tayar if (rc) 9573587cb87STomer Tayar goto alloc_err; 958fe56b9e6SYuval Mintz 9593587cb87STomer Tayar rc = qed_consq_alloc(p_hwfn); 9603587cb87STomer Tayar if (rc) 9613587cb87STomer Tayar goto alloc_err; 962fe56b9e6SYuval Mintz 9630a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2 9640a7fb11cSYuval Mintz if (p_hwfn->using_ll2) { 9653587cb87STomer Tayar rc = qed_ll2_alloc(p_hwfn); 9663587cb87STomer Tayar if (rc) 9673587cb87STomer Tayar goto alloc_err; 9680a7fb11cSYuval Mintz } 9690a7fb11cSYuval Mintz #endif 9701e128c81SArun Easi 9711e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { 9723587cb87STomer Tayar rc = qed_fcoe_alloc(p_hwfn); 9733587cb87STomer Tayar if (rc) 9743587cb87STomer Tayar goto alloc_err; 9751e128c81SArun Easi } 9761e128c81SArun Easi 977fc831825SYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 9783587cb87STomer Tayar rc = qed_iscsi_alloc(p_hwfn); 9793587cb87STomer Tayar if (rc) 9803587cb87STomer Tayar goto alloc_err; 9813587cb87STomer Tayar rc = qed_ooo_alloc(p_hwfn); 9823587cb87STomer Tayar if (rc) 9833587cb87STomer Tayar goto alloc_err; 984fc831825SYuval Mintz } 9850a7fb11cSYuval Mintz 986fe56b9e6SYuval Mintz /* DMA info initialization */ 987fe56b9e6SYuval Mintz rc = qed_dmae_info_alloc(p_hwfn); 9882591c280SJoe Perches if (rc) 989fe56b9e6SYuval Mintz goto alloc_err; 99039651abdSSudarsana Reddy Kalluru 99139651abdSSudarsana Reddy Kalluru /* DCBX initialization */ 99239651abdSSudarsana Reddy Kalluru rc = qed_dcbx_info_alloc(p_hwfn); 9932591c280SJoe Perches if (rc) 99439651abdSSudarsana Reddy Kalluru goto alloc_err; 99539651abdSSudarsana Reddy Kalluru } 996fe56b9e6SYuval Mintz 997fe56b9e6SYuval Mintz cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); 9982591c280SJoe Perches if (!cdev->reset_stats) 99983aeb933SYuval Mintz goto alloc_no_mem; 1000fe56b9e6SYuval Mintz 1001fe56b9e6SYuval Mintz return 0; 1002fe56b9e6SYuval Mintz 1003dbb799c3SYuval Mintz alloc_no_mem: 1004dbb799c3SYuval Mintz rc = -ENOMEM; 1005fe56b9e6SYuval Mintz alloc_err: 1006fe56b9e6SYuval Mintz qed_resc_free(cdev); 1007fe56b9e6SYuval Mintz return rc; 1008fe56b9e6SYuval Mintz } 1009fe56b9e6SYuval Mintz 1010fe56b9e6SYuval Mintz void qed_resc_setup(struct qed_dev *cdev) 1011fe56b9e6SYuval Mintz { 1012fe56b9e6SYuval Mintz int i; 1013fe56b9e6SYuval Mintz 10141408cc1fSYuval Mintz if (IS_VF(cdev)) 10151408cc1fSYuval Mintz return; 10161408cc1fSYuval Mintz 1017fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 1018fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1019fe56b9e6SYuval Mintz 1020fe56b9e6SYuval Mintz qed_cxt_mngr_setup(p_hwfn); 1021fe56b9e6SYuval Mintz qed_spq_setup(p_hwfn); 10223587cb87STomer Tayar qed_eq_setup(p_hwfn); 10233587cb87STomer Tayar qed_consq_setup(p_hwfn); 1024fe56b9e6SYuval Mintz 1025fe56b9e6SYuval Mintz /* Read shadow of current MFW mailbox */ 1026fe56b9e6SYuval Mintz qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 1027fe56b9e6SYuval Mintz memcpy(p_hwfn->mcp_info->mfw_mb_shadow, 1028fe56b9e6SYuval Mintz p_hwfn->mcp_info->mfw_mb_cur, 1029fe56b9e6SYuval Mintz p_hwfn->mcp_info->mfw_mb_length); 1030fe56b9e6SYuval Mintz 1031fe56b9e6SYuval Mintz qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); 103232a47e72SYuval Mintz 103332a47e72SYuval Mintz qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); 10340a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2 10350a7fb11cSYuval Mintz if (p_hwfn->using_ll2) 10363587cb87STomer Tayar qed_ll2_setup(p_hwfn); 10370a7fb11cSYuval Mintz #endif 10381e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 10393587cb87STomer Tayar qed_fcoe_setup(p_hwfn); 10401e128c81SArun Easi 10411d6cff4fSYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 10423587cb87STomer Tayar qed_iscsi_setup(p_hwfn); 10433587cb87STomer Tayar qed_ooo_setup(p_hwfn); 10441d6cff4fSYuval Mintz } 1045fe56b9e6SYuval Mintz } 1046fe56b9e6SYuval Mintz } 1047fe56b9e6SYuval Mintz 1048fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_CNT (100) 1049fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_TIME (10) 1050fe56b9e6SYuval Mintz int qed_final_cleanup(struct qed_hwfn *p_hwfn, 10510b55e27dSYuval Mintz struct qed_ptt *p_ptt, u16 id, bool is_vf) 1052fe56b9e6SYuval Mintz { 1053fe56b9e6SYuval Mintz u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 1054fe56b9e6SYuval Mintz int rc = -EBUSY; 1055fe56b9e6SYuval Mintz 1056fc48b7a6SYuval Mintz addr = GTT_BAR0_MAP_REG_USDM_RAM + 1057fc48b7a6SYuval Mintz USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 1058fe56b9e6SYuval Mintz 10590b55e27dSYuval Mintz if (is_vf) 10600b55e27dSYuval Mintz id += 0x10; 10610b55e27dSYuval Mintz 1062fc48b7a6SYuval Mintz command |= X_FINAL_CLEANUP_AGG_INT << 1063fc48b7a6SYuval Mintz SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 1064fc48b7a6SYuval Mintz command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 1065fc48b7a6SYuval Mintz command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 1066fc48b7a6SYuval Mintz command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 1067fe56b9e6SYuval Mintz 1068fe56b9e6SYuval Mintz /* Make sure notification is not set before initiating final cleanup */ 1069fe56b9e6SYuval Mintz if (REG_RD(p_hwfn, addr)) { 10701a635e48SYuval Mintz DP_NOTICE(p_hwfn, 1071fe56b9e6SYuval Mintz "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 1072fe56b9e6SYuval Mintz REG_WR(p_hwfn, addr, 0); 1073fe56b9e6SYuval Mintz } 1074fe56b9e6SYuval Mintz 1075fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1076fe56b9e6SYuval Mintz "Sending final cleanup for PFVF[%d] [Command %08x\n]", 1077fe56b9e6SYuval Mintz id, command); 1078fe56b9e6SYuval Mintz 1079fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 1080fe56b9e6SYuval Mintz 1081fe56b9e6SYuval Mintz /* Poll until completion */ 1082fe56b9e6SYuval Mintz while (!REG_RD(p_hwfn, addr) && count--) 1083fe56b9e6SYuval Mintz msleep(FINAL_CLEANUP_POLL_TIME); 1084fe56b9e6SYuval Mintz 1085fe56b9e6SYuval Mintz if (REG_RD(p_hwfn, addr)) 1086fe56b9e6SYuval Mintz rc = 0; 1087fe56b9e6SYuval Mintz else 1088fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, 1089fe56b9e6SYuval Mintz "Failed to receive FW final cleanup notification\n"); 1090fe56b9e6SYuval Mintz 1091fe56b9e6SYuval Mintz /* Cleanup afterwards */ 1092fe56b9e6SYuval Mintz REG_WR(p_hwfn, addr, 0); 1093fe56b9e6SYuval Mintz 1094fe56b9e6SYuval Mintz return rc; 1095fe56b9e6SYuval Mintz } 1096fe56b9e6SYuval Mintz 10979c79ddaaSMintz, Yuval static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) 1098fe56b9e6SYuval Mintz { 1099fe56b9e6SYuval Mintz int hw_mode = 0; 1100fe56b9e6SYuval Mintz 11019c79ddaaSMintz, Yuval if (QED_IS_BB_B0(p_hwfn->cdev)) { 11029c79ddaaSMintz, Yuval hw_mode |= 1 << MODE_BB; 11039c79ddaaSMintz, Yuval } else if (QED_IS_AH(p_hwfn->cdev)) { 11049c79ddaaSMintz, Yuval hw_mode |= 1 << MODE_K2; 11059c79ddaaSMintz, Yuval } else { 11069c79ddaaSMintz, Yuval DP_NOTICE(p_hwfn, "Unknown chip type %#x\n", 11079c79ddaaSMintz, Yuval p_hwfn->cdev->type); 11089c79ddaaSMintz, Yuval return -EINVAL; 11099c79ddaaSMintz, Yuval } 1110fe56b9e6SYuval Mintz 1111fe56b9e6SYuval Mintz switch (p_hwfn->cdev->num_ports_in_engines) { 1112fe56b9e6SYuval Mintz case 1: 1113fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 1114fe56b9e6SYuval Mintz break; 1115fe56b9e6SYuval Mintz case 2: 1116fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 1117fe56b9e6SYuval Mintz break; 1118fe56b9e6SYuval Mintz case 4: 1119fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 1120fe56b9e6SYuval Mintz break; 1121fe56b9e6SYuval Mintz default: 1122fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", 1123fe56b9e6SYuval Mintz p_hwfn->cdev->num_ports_in_engines); 11249c79ddaaSMintz, Yuval return -EINVAL; 1125fe56b9e6SYuval Mintz } 1126fe56b9e6SYuval Mintz 1127fe56b9e6SYuval Mintz switch (p_hwfn->cdev->mf_mode) { 1128fc48b7a6SYuval Mintz case QED_MF_DEFAULT: 1129fc48b7a6SYuval Mintz case QED_MF_NPAR: 1130fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_MF_SI; 1131fe56b9e6SYuval Mintz break; 1132fc48b7a6SYuval Mintz case QED_MF_OVLAN: 1133fc48b7a6SYuval Mintz hw_mode |= 1 << MODE_MF_SD; 1134fc48b7a6SYuval Mintz break; 1135fe56b9e6SYuval Mintz default: 1136fc48b7a6SYuval Mintz DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); 1137fc48b7a6SYuval Mintz hw_mode |= 1 << MODE_MF_SI; 1138fe56b9e6SYuval Mintz } 1139fe56b9e6SYuval Mintz 1140fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_ASIC; 1141fe56b9e6SYuval Mintz 11421af9dcf7SYuval Mintz if (p_hwfn->cdev->num_hwfns > 1) 11431af9dcf7SYuval Mintz hw_mode |= 1 << MODE_100G; 11441af9dcf7SYuval Mintz 1145fe56b9e6SYuval Mintz p_hwfn->hw_info.hw_mode = hw_mode; 11461af9dcf7SYuval Mintz 11471af9dcf7SYuval Mintz DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), 11481af9dcf7SYuval Mintz "Configuring function for hw_mode: 0x%08x\n", 11491af9dcf7SYuval Mintz p_hwfn->hw_info.hw_mode); 11509c79ddaaSMintz, Yuval 11519c79ddaaSMintz, Yuval return 0; 1152fe56b9e6SYuval Mintz } 1153fe56b9e6SYuval Mintz 1154fe56b9e6SYuval Mintz /* Init run time data for all PFs on an engine. */ 1155fe56b9e6SYuval Mintz static void qed_init_cau_rt_data(struct qed_dev *cdev) 1156fe56b9e6SYuval Mintz { 1157fe56b9e6SYuval Mintz u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 1158fe56b9e6SYuval Mintz int i, sb_id; 1159fe56b9e6SYuval Mintz 1160fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 1161fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1162fe56b9e6SYuval Mintz struct qed_igu_info *p_igu_info; 1163fe56b9e6SYuval Mintz struct qed_igu_block *p_block; 1164fe56b9e6SYuval Mintz struct cau_sb_entry sb_entry; 1165fe56b9e6SYuval Mintz 1166fe56b9e6SYuval Mintz p_igu_info = p_hwfn->hw_info.p_igu_info; 1167fe56b9e6SYuval Mintz 1168fe56b9e6SYuval Mintz for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev); 1169fe56b9e6SYuval Mintz sb_id++) { 1170fe56b9e6SYuval Mintz p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; 1171fe56b9e6SYuval Mintz if (!p_block->is_pf) 1172fe56b9e6SYuval Mintz continue; 1173fe56b9e6SYuval Mintz 1174fe56b9e6SYuval Mintz qed_init_cau_sb_entry(p_hwfn, &sb_entry, 11751a635e48SYuval Mintz p_block->function_id, 0, 0); 11761a635e48SYuval Mintz STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); 1177fe56b9e6SYuval Mintz } 1178fe56b9e6SYuval Mintz } 1179fe56b9e6SYuval Mintz } 1180fe56b9e6SYuval Mintz 118160afed72STomer Tayar static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, 118260afed72STomer Tayar struct qed_ptt *p_ptt) 118360afed72STomer Tayar { 118460afed72STomer Tayar u32 val, wr_mbs, cache_line_size; 118560afed72STomer Tayar 118660afed72STomer Tayar val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 118760afed72STomer Tayar switch (val) { 118860afed72STomer Tayar case 0: 118960afed72STomer Tayar wr_mbs = 128; 119060afed72STomer Tayar break; 119160afed72STomer Tayar case 1: 119260afed72STomer Tayar wr_mbs = 256; 119360afed72STomer Tayar break; 119460afed72STomer Tayar case 2: 119560afed72STomer Tayar wr_mbs = 512; 119660afed72STomer Tayar break; 119760afed72STomer Tayar default: 119860afed72STomer Tayar DP_INFO(p_hwfn, 119960afed72STomer Tayar "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 120060afed72STomer Tayar val); 120160afed72STomer Tayar return; 120260afed72STomer Tayar } 120360afed72STomer Tayar 120460afed72STomer Tayar cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs); 120560afed72STomer Tayar switch (cache_line_size) { 120660afed72STomer Tayar case 32: 120760afed72STomer Tayar val = 0; 120860afed72STomer Tayar break; 120960afed72STomer Tayar case 64: 121060afed72STomer Tayar val = 1; 121160afed72STomer Tayar break; 121260afed72STomer Tayar case 128: 121360afed72STomer Tayar val = 2; 121460afed72STomer Tayar break; 121560afed72STomer Tayar case 256: 121660afed72STomer Tayar val = 3; 121760afed72STomer Tayar break; 121860afed72STomer Tayar default: 121960afed72STomer Tayar DP_INFO(p_hwfn, 122060afed72STomer Tayar "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 122160afed72STomer Tayar cache_line_size); 122260afed72STomer Tayar } 122360afed72STomer Tayar 122460afed72STomer Tayar if (L1_CACHE_BYTES > wr_mbs) 122560afed72STomer Tayar DP_INFO(p_hwfn, 122660afed72STomer Tayar "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 122760afed72STomer Tayar L1_CACHE_BYTES, wr_mbs); 122860afed72STomer Tayar 122960afed72STomer Tayar STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 123060afed72STomer Tayar } 123160afed72STomer Tayar 1232fe56b9e6SYuval Mintz static int qed_hw_init_common(struct qed_hwfn *p_hwfn, 12331a635e48SYuval Mintz struct qed_ptt *p_ptt, int hw_mode) 1234fe56b9e6SYuval Mintz { 1235fe56b9e6SYuval Mintz struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1236fe56b9e6SYuval Mintz struct qed_qm_common_rt_init_params params; 1237fe56b9e6SYuval Mintz struct qed_dev *cdev = p_hwfn->cdev; 12389c79ddaaSMintz, Yuval u8 vf_id, max_num_vfs; 1239dbb799c3SYuval Mintz u16 num_pfs, pf_id; 12401408cc1fSYuval Mintz u32 concrete_fid; 1241fe56b9e6SYuval Mintz int rc = 0; 1242fe56b9e6SYuval Mintz 1243fe56b9e6SYuval Mintz qed_init_cau_rt_data(cdev); 1244fe56b9e6SYuval Mintz 1245fe56b9e6SYuval Mintz /* Program GTT windows */ 1246fe56b9e6SYuval Mintz qed_gtt_init(p_hwfn); 1247fe56b9e6SYuval Mintz 1248fe56b9e6SYuval Mintz if (p_hwfn->mcp_info) { 1249fe56b9e6SYuval Mintz if (p_hwfn->mcp_info->func_info.bandwidth_max) 1250fe56b9e6SYuval Mintz qm_info->pf_rl_en = 1; 1251fe56b9e6SYuval Mintz if (p_hwfn->mcp_info->func_info.bandwidth_min) 1252fe56b9e6SYuval Mintz qm_info->pf_wfq_en = 1; 1253fe56b9e6SYuval Mintz } 1254fe56b9e6SYuval Mintz 1255fe56b9e6SYuval Mintz memset(¶ms, 0, sizeof(params)); 1256fe56b9e6SYuval Mintz params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines; 1257fe56b9e6SYuval Mintz params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; 1258fe56b9e6SYuval Mintz params.pf_rl_en = qm_info->pf_rl_en; 1259fe56b9e6SYuval Mintz params.pf_wfq_en = qm_info->pf_wfq_en; 1260fe56b9e6SYuval Mintz params.vport_rl_en = qm_info->vport_rl_en; 1261fe56b9e6SYuval Mintz params.vport_wfq_en = qm_info->vport_wfq_en; 1262fe56b9e6SYuval Mintz params.port_params = qm_info->qm_port_params; 1263fe56b9e6SYuval Mintz 1264fe56b9e6SYuval Mintz qed_qm_common_rt_init(p_hwfn, ¶ms); 1265fe56b9e6SYuval Mintz 1266fe56b9e6SYuval Mintz qed_cxt_hw_init_common(p_hwfn); 1267fe56b9e6SYuval Mintz 126860afed72STomer Tayar qed_init_cache_line_size(p_hwfn, p_ptt); 126960afed72STomer Tayar 1270fe56b9e6SYuval Mintz rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 12711a635e48SYuval Mintz if (rc) 1272fe56b9e6SYuval Mintz return rc; 1273fe56b9e6SYuval Mintz 1274fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 1275fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 1276fe56b9e6SYuval Mintz 1277dbb799c3SYuval Mintz if (QED_IS_BB(p_hwfn->cdev)) { 1278dbb799c3SYuval Mintz num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev); 1279dbb799c3SYuval Mintz for (pf_id = 0; pf_id < num_pfs; pf_id++) { 1280dbb799c3SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, pf_id); 1281dbb799c3SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1282dbb799c3SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1283dbb799c3SYuval Mintz } 1284dbb799c3SYuval Mintz /* pretend to original PF */ 1285dbb799c3SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1286dbb799c3SYuval Mintz } 1287fe56b9e6SYuval Mintz 12889c79ddaaSMintz, Yuval max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 12899c79ddaaSMintz, Yuval for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 12901408cc1fSYuval Mintz concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); 12911408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); 12921408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 129305fafbfbSYuval Mintz qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 129405fafbfbSYuval Mintz qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 129505fafbfbSYuval Mintz qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 12961408cc1fSYuval Mintz } 12971408cc1fSYuval Mintz /* pretend to original PF */ 12981408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 12991408cc1fSYuval Mintz 1300fe56b9e6SYuval Mintz return rc; 1301fe56b9e6SYuval Mintz } 1302fe56b9e6SYuval Mintz 130351ff1725SRam Amrani static int 130451ff1725SRam Amrani qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, 130551ff1725SRam Amrani struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) 130651ff1725SRam Amrani { 1307107392b7SRam Amrani u32 dpi_bit_shift, dpi_count, dpi_page_size; 130851ff1725SRam Amrani u32 min_dpis; 1309107392b7SRam Amrani u32 n_wids; 131051ff1725SRam Amrani 131151ff1725SRam Amrani /* Calculate DPI size */ 1312107392b7SRam Amrani n_wids = max_t(u32, QED_MIN_WIDS, n_cpus); 1313107392b7SRam Amrani dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids); 1314107392b7SRam Amrani dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); 131551ff1725SRam Amrani dpi_bit_shift = ilog2(dpi_page_size / 4096); 131651ff1725SRam Amrani dpi_count = pwm_region_size / dpi_page_size; 131751ff1725SRam Amrani 131851ff1725SRam Amrani min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 131951ff1725SRam Amrani min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis); 132051ff1725SRam Amrani 132151ff1725SRam Amrani p_hwfn->dpi_size = dpi_page_size; 132251ff1725SRam Amrani p_hwfn->dpi_count = dpi_count; 132351ff1725SRam Amrani 132451ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 132551ff1725SRam Amrani 132651ff1725SRam Amrani if (dpi_count < min_dpis) 132751ff1725SRam Amrani return -EINVAL; 132851ff1725SRam Amrani 132951ff1725SRam Amrani return 0; 133051ff1725SRam Amrani } 133151ff1725SRam Amrani 133251ff1725SRam Amrani enum QED_ROCE_EDPM_MODE { 133351ff1725SRam Amrani QED_ROCE_EDPM_MODE_ENABLE = 0, 133451ff1725SRam Amrani QED_ROCE_EDPM_MODE_FORCE_ON = 1, 133551ff1725SRam Amrani QED_ROCE_EDPM_MODE_DISABLE = 2, 133651ff1725SRam Amrani }; 133751ff1725SRam Amrani 133851ff1725SRam Amrani static int 133951ff1725SRam Amrani qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 134051ff1725SRam Amrani { 134151ff1725SRam Amrani u32 pwm_regsize, norm_regsize; 134251ff1725SRam Amrani u32 non_pwm_conn, min_addr_reg1; 134320b1bd96SRam Amrani u32 db_bar_size, n_cpus = 1; 134451ff1725SRam Amrani u32 roce_edpm_mode; 134551ff1725SRam Amrani u32 pf_dems_shift; 134651ff1725SRam Amrani int rc = 0; 134751ff1725SRam Amrani u8 cond; 134851ff1725SRam Amrani 134915582962SRahul Verma db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); 135051ff1725SRam Amrani if (p_hwfn->cdev->num_hwfns > 1) 135151ff1725SRam Amrani db_bar_size /= 2; 135251ff1725SRam Amrani 135351ff1725SRam Amrani /* Calculate doorbell regions */ 135451ff1725SRam Amrani non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 135551ff1725SRam Amrani qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 135651ff1725SRam Amrani NULL) + 135751ff1725SRam Amrani qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 135851ff1725SRam Amrani NULL); 1359a82dadbcSRam Amrani norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE); 136051ff1725SRam Amrani min_addr_reg1 = norm_regsize / 4096; 136151ff1725SRam Amrani pwm_regsize = db_bar_size - norm_regsize; 136251ff1725SRam Amrani 136351ff1725SRam Amrani /* Check that the normal and PWM sizes are valid */ 136451ff1725SRam Amrani if (db_bar_size < norm_regsize) { 136551ff1725SRam Amrani DP_ERR(p_hwfn->cdev, 136651ff1725SRam Amrani "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", 136751ff1725SRam Amrani db_bar_size, norm_regsize); 136851ff1725SRam Amrani return -EINVAL; 136951ff1725SRam Amrani } 137051ff1725SRam Amrani 137151ff1725SRam Amrani if (pwm_regsize < QED_MIN_PWM_REGION) { 137251ff1725SRam Amrani DP_ERR(p_hwfn->cdev, 137351ff1725SRam Amrani "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", 137451ff1725SRam Amrani pwm_regsize, 137551ff1725SRam Amrani QED_MIN_PWM_REGION, db_bar_size, norm_regsize); 137651ff1725SRam Amrani return -EINVAL; 137751ff1725SRam Amrani } 137851ff1725SRam Amrani 137951ff1725SRam Amrani /* Calculate number of DPIs */ 138051ff1725SRam Amrani roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 138151ff1725SRam Amrani if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) || 138251ff1725SRam Amrani ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) { 138351ff1725SRam Amrani /* Either EDPM is mandatory, or we are attempting to allocate a 138451ff1725SRam Amrani * WID per CPU. 138551ff1725SRam Amrani */ 1386c2dedf87SRam Amrani n_cpus = num_present_cpus(); 138751ff1725SRam Amrani rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 138851ff1725SRam Amrani } 138951ff1725SRam Amrani 139051ff1725SRam Amrani cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) || 139151ff1725SRam Amrani (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE); 139251ff1725SRam Amrani if (cond || p_hwfn->dcbx_no_edpm) { 139351ff1725SRam Amrani /* Either EDPM is disabled from user configuration, or it is 139451ff1725SRam Amrani * disabled via DCBx, or it is not mandatory and we failed to 139551ff1725SRam Amrani * allocated a WID per CPU. 139651ff1725SRam Amrani */ 139751ff1725SRam Amrani n_cpus = 1; 139851ff1725SRam Amrani rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 139951ff1725SRam Amrani 140051ff1725SRam Amrani if (cond) 140151ff1725SRam Amrani qed_rdma_dpm_bar(p_hwfn, p_ptt); 140251ff1725SRam Amrani } 140351ff1725SRam Amrani 140420b1bd96SRam Amrani p_hwfn->wid_count = (u16) n_cpus; 140520b1bd96SRam Amrani 140651ff1725SRam Amrani DP_INFO(p_hwfn, 140751ff1725SRam Amrani "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", 140851ff1725SRam Amrani norm_regsize, 140951ff1725SRam Amrani pwm_regsize, 141051ff1725SRam Amrani p_hwfn->dpi_size, 141151ff1725SRam Amrani p_hwfn->dpi_count, 141251ff1725SRam Amrani ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 141351ff1725SRam Amrani "disabled" : "enabled"); 141451ff1725SRam Amrani 141551ff1725SRam Amrani if (rc) { 141651ff1725SRam Amrani DP_ERR(p_hwfn, 141751ff1725SRam Amrani "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n", 141851ff1725SRam Amrani p_hwfn->dpi_count, 141951ff1725SRam Amrani p_hwfn->pf_params.rdma_pf_params.min_dpis); 142051ff1725SRam Amrani return -EINVAL; 142151ff1725SRam Amrani } 142251ff1725SRam Amrani 142351ff1725SRam Amrani p_hwfn->dpi_start_offset = norm_regsize; 142451ff1725SRam Amrani 142551ff1725SRam Amrani /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 142651ff1725SRam Amrani pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4); 142751ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 142851ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 142951ff1725SRam Amrani 143051ff1725SRam Amrani return 0; 143151ff1725SRam Amrani } 143251ff1725SRam Amrani 1433fe56b9e6SYuval Mintz static int qed_hw_init_port(struct qed_hwfn *p_hwfn, 14341a635e48SYuval Mintz struct qed_ptt *p_ptt, int hw_mode) 1435fe56b9e6SYuval Mintz { 143605fafbfbSYuval Mintz return qed_init_run(p_hwfn, p_ptt, PHASE_PORT, 143705fafbfbSYuval Mintz p_hwfn->port_id, hw_mode); 1438fe56b9e6SYuval Mintz } 1439fe56b9e6SYuval Mintz 1440fe56b9e6SYuval Mintz static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, 1441fe56b9e6SYuval Mintz struct qed_ptt *p_ptt, 144219968430SChopra, Manish struct qed_tunnel_info *p_tunn, 1443fe56b9e6SYuval Mintz int hw_mode, 1444fe56b9e6SYuval Mintz bool b_hw_start, 1445fe56b9e6SYuval Mintz enum qed_int_mode int_mode, 1446fe56b9e6SYuval Mintz bool allow_npar_tx_switch) 1447fe56b9e6SYuval Mintz { 1448fe56b9e6SYuval Mintz u8 rel_pf_id = p_hwfn->rel_pf_id; 1449fe56b9e6SYuval Mintz int rc = 0; 1450fe56b9e6SYuval Mintz 1451fe56b9e6SYuval Mintz if (p_hwfn->mcp_info) { 1452fe56b9e6SYuval Mintz struct qed_mcp_function_info *p_info; 1453fe56b9e6SYuval Mintz 1454fe56b9e6SYuval Mintz p_info = &p_hwfn->mcp_info->func_info; 1455fe56b9e6SYuval Mintz if (p_info->bandwidth_min) 1456fe56b9e6SYuval Mintz p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 1457fe56b9e6SYuval Mintz 1458fe56b9e6SYuval Mintz /* Update rate limit once we'll actually have a link */ 14594b01e519SManish Chopra p_hwfn->qm_info.pf_rl = 100000; 1460fe56b9e6SYuval Mintz } 1461fe56b9e6SYuval Mintz 146215582962SRahul Verma qed_cxt_hw_init_pf(p_hwfn, p_ptt); 1463fe56b9e6SYuval Mintz 1464fe56b9e6SYuval Mintz qed_int_igu_init_rt(p_hwfn); 1465fe56b9e6SYuval Mintz 1466fe56b9e6SYuval Mintz /* Set VLAN in NIG if needed */ 14671a635e48SYuval Mintz if (hw_mode & BIT(MODE_MF_SD)) { 1468fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 1469fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 1470fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 1471fe56b9e6SYuval Mintz p_hwfn->hw_info.ovlan); 1472fe56b9e6SYuval Mintz } 1473fe56b9e6SYuval Mintz 1474fe56b9e6SYuval Mintz /* Enable classification by MAC if needed */ 14751a635e48SYuval Mintz if (hw_mode & BIT(MODE_MF_SI)) { 1476fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 1477fe56b9e6SYuval Mintz "Configuring TAGMAC_CLS_TYPE\n"); 1478fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, 1479fe56b9e6SYuval Mintz NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 1480fe56b9e6SYuval Mintz } 1481fe56b9e6SYuval Mintz 1482fe56b9e6SYuval Mintz /* Protocl Configuration */ 1483dbb799c3SYuval Mintz STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 1484dbb799c3SYuval Mintz (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); 14851e128c81SArun Easi STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 14861e128c81SArun Easi (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); 1487fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 1488fe56b9e6SYuval Mintz 1489fe56b9e6SYuval Mintz /* Cleanup chip from previous driver if such remains exist */ 14900b55e27dSYuval Mintz rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); 14911a635e48SYuval Mintz if (rc) 1492fe56b9e6SYuval Mintz return rc; 1493fe56b9e6SYuval Mintz 1494fe56b9e6SYuval Mintz /* PF Init sequence */ 1495fe56b9e6SYuval Mintz rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 1496fe56b9e6SYuval Mintz if (rc) 1497fe56b9e6SYuval Mintz return rc; 1498fe56b9e6SYuval Mintz 1499fe56b9e6SYuval Mintz /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 1500fe56b9e6SYuval Mintz rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 1501fe56b9e6SYuval Mintz if (rc) 1502fe56b9e6SYuval Mintz return rc; 1503fe56b9e6SYuval Mintz 1504fe56b9e6SYuval Mintz /* Pure runtime initializations - directly to the HW */ 1505fe56b9e6SYuval Mintz qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 1506fe56b9e6SYuval Mintz 150751ff1725SRam Amrani rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 150851ff1725SRam Amrani if (rc) 150951ff1725SRam Amrani return rc; 151051ff1725SRam Amrani 1511fe56b9e6SYuval Mintz if (b_hw_start) { 1512fe56b9e6SYuval Mintz /* enable interrupts */ 1513fe56b9e6SYuval Mintz qed_int_igu_enable(p_hwfn, p_ptt, int_mode); 1514fe56b9e6SYuval Mintz 1515fe56b9e6SYuval Mintz /* send function start command */ 15164f64675fSManish Chopra rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, 15174f64675fSManish Chopra p_hwfn->cdev->mf_mode, 1518831bfb0eSYuval Mintz allow_npar_tx_switch); 15191e128c81SArun Easi if (rc) { 1520fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); 15211e128c81SArun Easi return rc; 15221e128c81SArun Easi } 15231e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { 15241e128c81SArun Easi qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2)); 15251e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 15261e128c81SArun Easi PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 15271e128c81SArun Easi 0x100); 15281e128c81SArun Easi } 1529fe56b9e6SYuval Mintz } 1530fe56b9e6SYuval Mintz return rc; 1531fe56b9e6SYuval Mintz } 1532fe56b9e6SYuval Mintz 1533fe56b9e6SYuval Mintz static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn, 1534fe56b9e6SYuval Mintz struct qed_ptt *p_ptt, 1535fe56b9e6SYuval Mintz u8 enable) 1536fe56b9e6SYuval Mintz { 1537fe56b9e6SYuval Mintz u32 delay_idx = 0, val, set_val = enable ? 1 : 0; 1538fe56b9e6SYuval Mintz 1539fe56b9e6SYuval Mintz /* Change PF in PXP */ 1540fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, 1541fe56b9e6SYuval Mintz PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 1542fe56b9e6SYuval Mintz 1543fe56b9e6SYuval Mintz /* wait until value is set - try for 1 second every 50us */ 1544fe56b9e6SYuval Mintz for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 1545fe56b9e6SYuval Mintz val = qed_rd(p_hwfn, p_ptt, 1546fe56b9e6SYuval Mintz PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1547fe56b9e6SYuval Mintz if (val == set_val) 1548fe56b9e6SYuval Mintz break; 1549fe56b9e6SYuval Mintz 1550fe56b9e6SYuval Mintz usleep_range(50, 60); 1551fe56b9e6SYuval Mintz } 1552fe56b9e6SYuval Mintz 1553fe56b9e6SYuval Mintz if (val != set_val) { 1554fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, 1555fe56b9e6SYuval Mintz "PFID_ENABLE_MASTER wasn't changed after a second\n"); 1556fe56b9e6SYuval Mintz return -EAGAIN; 1557fe56b9e6SYuval Mintz } 1558fe56b9e6SYuval Mintz 1559fe56b9e6SYuval Mintz return 0; 1560fe56b9e6SYuval Mintz } 1561fe56b9e6SYuval Mintz 1562fe56b9e6SYuval Mintz static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, 1563fe56b9e6SYuval Mintz struct qed_ptt *p_main_ptt) 1564fe56b9e6SYuval Mintz { 1565fe56b9e6SYuval Mintz /* Read shadow of current MFW mailbox */ 1566fe56b9e6SYuval Mintz qed_mcp_read_mb(p_hwfn, p_main_ptt); 1567fe56b9e6SYuval Mintz memcpy(p_hwfn->mcp_info->mfw_mb_shadow, 15681a635e48SYuval Mintz p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); 1569fe56b9e6SYuval Mintz } 1570fe56b9e6SYuval Mintz 15715d24bcf1STomer Tayar static void 15725d24bcf1STomer Tayar qed_fill_load_req_params(struct qed_load_req_params *p_load_req, 15735d24bcf1STomer Tayar struct qed_drv_load_params *p_drv_load) 15745d24bcf1STomer Tayar { 15755d24bcf1STomer Tayar memset(p_load_req, 0, sizeof(*p_load_req)); 15765d24bcf1STomer Tayar 15775d24bcf1STomer Tayar p_load_req->drv_role = p_drv_load->is_crash_kernel ? 15785d24bcf1STomer Tayar QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS; 15795d24bcf1STomer Tayar p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 15805d24bcf1STomer Tayar p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 15815d24bcf1STomer Tayar p_load_req->override_force_load = p_drv_load->override_force_load; 15825d24bcf1STomer Tayar } 15835d24bcf1STomer Tayar 1584eaf3c0c6SChopra, Manish static int qed_vf_start(struct qed_hwfn *p_hwfn, 1585eaf3c0c6SChopra, Manish struct qed_hw_init_params *p_params) 1586eaf3c0c6SChopra, Manish { 1587eaf3c0c6SChopra, Manish if (p_params->p_tunn) { 1588eaf3c0c6SChopra, Manish qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 1589eaf3c0c6SChopra, Manish qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 1590eaf3c0c6SChopra, Manish } 1591eaf3c0c6SChopra, Manish 1592eaf3c0c6SChopra, Manish p_hwfn->b_int_enabled = 1; 1593eaf3c0c6SChopra, Manish 1594eaf3c0c6SChopra, Manish return 0; 1595eaf3c0c6SChopra, Manish } 1596eaf3c0c6SChopra, Manish 1597c0c2d0b4SMintz, Yuval int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) 1598fe56b9e6SYuval Mintz { 15995d24bcf1STomer Tayar struct qed_load_req_params load_req_params; 16000fefbfbaSSudarsana Kalluru u32 load_code, param, drv_mb_param; 16010fefbfbaSSudarsana Kalluru bool b_default_mtu = true; 16020fefbfbaSSudarsana Kalluru struct qed_hwfn *p_hwfn; 16030fefbfbaSSudarsana Kalluru int rc = 0, mfw_rc, i; 1604fe56b9e6SYuval Mintz 1605c0c2d0b4SMintz, Yuval if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 1606bb13ace7SSudarsana Reddy Kalluru DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 1607bb13ace7SSudarsana Reddy Kalluru return -EINVAL; 1608bb13ace7SSudarsana Reddy Kalluru } 1609bb13ace7SSudarsana Reddy Kalluru 16101408cc1fSYuval Mintz if (IS_PF(cdev)) { 1611c0c2d0b4SMintz, Yuval rc = qed_init_fw_data(cdev, p_params->bin_fw_data); 16121a635e48SYuval Mintz if (rc) 1613fe56b9e6SYuval Mintz return rc; 16141408cc1fSYuval Mintz } 1615fe56b9e6SYuval Mintz 1616fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 1617fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1618fe56b9e6SYuval Mintz 16190fefbfbaSSudarsana Kalluru /* If management didn't provide a default, set one of our own */ 16200fefbfbaSSudarsana Kalluru if (!p_hwfn->hw_info.mtu) { 16210fefbfbaSSudarsana Kalluru p_hwfn->hw_info.mtu = 1500; 16220fefbfbaSSudarsana Kalluru b_default_mtu = false; 16230fefbfbaSSudarsana Kalluru } 16240fefbfbaSSudarsana Kalluru 16251408cc1fSYuval Mintz if (IS_VF(cdev)) { 1626eaf3c0c6SChopra, Manish qed_vf_start(p_hwfn, p_params); 16271408cc1fSYuval Mintz continue; 16281408cc1fSYuval Mintz } 16291408cc1fSYuval Mintz 1630fe56b9e6SYuval Mintz /* Enable DMAE in PXP */ 1631fe56b9e6SYuval Mintz rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); 1632fe56b9e6SYuval Mintz 16339c79ddaaSMintz, Yuval rc = qed_calc_hw_mode(p_hwfn); 16349c79ddaaSMintz, Yuval if (rc) 16359c79ddaaSMintz, Yuval return rc; 1636fe56b9e6SYuval Mintz 16375d24bcf1STomer Tayar qed_fill_load_req_params(&load_req_params, 16385d24bcf1STomer Tayar p_params->p_drv_load_params); 16395d24bcf1STomer Tayar rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 16405d24bcf1STomer Tayar &load_req_params); 1641fe56b9e6SYuval Mintz if (rc) { 16425d24bcf1STomer Tayar DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n"); 1643fe56b9e6SYuval Mintz return rc; 1644fe56b9e6SYuval Mintz } 1645fe56b9e6SYuval Mintz 16465d24bcf1STomer Tayar load_code = load_req_params.load_code; 1647fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_SP, 16485d24bcf1STomer Tayar "Load request was sent. Load code: 0x%x\n", 16495d24bcf1STomer Tayar load_code); 16505d24bcf1STomer Tayar 16515d24bcf1STomer Tayar qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 1652fe56b9e6SYuval Mintz 1653fe56b9e6SYuval Mintz p_hwfn->first_on_engine = (load_code == 1654fe56b9e6SYuval Mintz FW_MSG_CODE_DRV_LOAD_ENGINE); 1655fe56b9e6SYuval Mintz 1656fe56b9e6SYuval Mintz switch (load_code) { 1657fe56b9e6SYuval Mintz case FW_MSG_CODE_DRV_LOAD_ENGINE: 1658fe56b9e6SYuval Mintz rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 1659fe56b9e6SYuval Mintz p_hwfn->hw_info.hw_mode); 1660fe56b9e6SYuval Mintz if (rc) 1661fe56b9e6SYuval Mintz break; 1662fe56b9e6SYuval Mintz /* Fall into */ 1663fe56b9e6SYuval Mintz case FW_MSG_CODE_DRV_LOAD_PORT: 1664fe56b9e6SYuval Mintz rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 1665fe56b9e6SYuval Mintz p_hwfn->hw_info.hw_mode); 1666fe56b9e6SYuval Mintz if (rc) 1667fe56b9e6SYuval Mintz break; 1668fe56b9e6SYuval Mintz 1669fe56b9e6SYuval Mintz /* Fall into */ 1670fe56b9e6SYuval Mintz case FW_MSG_CODE_DRV_LOAD_FUNCTION: 1671fe56b9e6SYuval Mintz rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 1672c0c2d0b4SMintz, Yuval p_params->p_tunn, 1673c0c2d0b4SMintz, Yuval p_hwfn->hw_info.hw_mode, 1674c0c2d0b4SMintz, Yuval p_params->b_hw_start, 1675c0c2d0b4SMintz, Yuval p_params->int_mode, 1676c0c2d0b4SMintz, Yuval p_params->allow_npar_tx_switch); 1677fe56b9e6SYuval Mintz break; 1678fe56b9e6SYuval Mintz default: 1679c0c2d0b4SMintz, Yuval DP_NOTICE(p_hwfn, 1680c0c2d0b4SMintz, Yuval "Unexpected load code [0x%08x]", load_code); 1681fe56b9e6SYuval Mintz rc = -EINVAL; 1682fe56b9e6SYuval Mintz break; 1683fe56b9e6SYuval Mintz } 1684fe56b9e6SYuval Mintz 1685fe56b9e6SYuval Mintz if (rc) 1686fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, 1687fe56b9e6SYuval Mintz "init phase failed for loadcode 0x%x (rc %d)\n", 1688fe56b9e6SYuval Mintz load_code, rc); 1689fe56b9e6SYuval Mintz 1690fe56b9e6SYuval Mintz /* ACK mfw regardless of success or failure of initialization */ 1691fe56b9e6SYuval Mintz mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 1692fe56b9e6SYuval Mintz DRV_MSG_CODE_LOAD_DONE, 1693fe56b9e6SYuval Mintz 0, &load_code, ¶m); 1694fe56b9e6SYuval Mintz if (rc) 1695fe56b9e6SYuval Mintz return rc; 1696fe56b9e6SYuval Mintz if (mfw_rc) { 1697fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n"); 1698fe56b9e6SYuval Mintz return mfw_rc; 1699fe56b9e6SYuval Mintz } 1700fe56b9e6SYuval Mintz 170139651abdSSudarsana Reddy Kalluru /* send DCBX attention request command */ 170239651abdSSudarsana Reddy Kalluru DP_VERBOSE(p_hwfn, 170339651abdSSudarsana Reddy Kalluru QED_MSG_DCB, 170439651abdSSudarsana Reddy Kalluru "sending phony dcbx set command to trigger DCBx attention handling\n"); 170539651abdSSudarsana Reddy Kalluru mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 170639651abdSSudarsana Reddy Kalluru DRV_MSG_CODE_SET_DCBX, 170739651abdSSudarsana Reddy Kalluru 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, 170839651abdSSudarsana Reddy Kalluru &load_code, ¶m); 170939651abdSSudarsana Reddy Kalluru if (mfw_rc) { 171039651abdSSudarsana Reddy Kalluru DP_NOTICE(p_hwfn, 171139651abdSSudarsana Reddy Kalluru "Failed to send DCBX attention request\n"); 171239651abdSSudarsana Reddy Kalluru return mfw_rc; 171339651abdSSudarsana Reddy Kalluru } 171439651abdSSudarsana Reddy Kalluru 1715fe56b9e6SYuval Mintz p_hwfn->hw_init_done = true; 1716fe56b9e6SYuval Mintz } 1717fe56b9e6SYuval Mintz 17180fefbfbaSSudarsana Kalluru if (IS_PF(cdev)) { 17190fefbfbaSSudarsana Kalluru p_hwfn = QED_LEADING_HWFN(cdev); 17205d24bcf1STomer Tayar drv_mb_param = STORM_FW_VERSION; 17210fefbfbaSSudarsana Kalluru rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 17220fefbfbaSSudarsana Kalluru DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 17230fefbfbaSSudarsana Kalluru drv_mb_param, &load_code, ¶m); 17240fefbfbaSSudarsana Kalluru if (rc) 17250fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, "Failed to update firmware version\n"); 17260fefbfbaSSudarsana Kalluru 17270fefbfbaSSudarsana Kalluru if (!b_default_mtu) { 17280fefbfbaSSudarsana Kalluru rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 17290fefbfbaSSudarsana Kalluru p_hwfn->hw_info.mtu); 17300fefbfbaSSudarsana Kalluru if (rc) 17310fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, 17320fefbfbaSSudarsana Kalluru "Failed to update default mtu\n"); 17330fefbfbaSSudarsana Kalluru } 17340fefbfbaSSudarsana Kalluru 17350fefbfbaSSudarsana Kalluru rc = qed_mcp_ov_update_driver_state(p_hwfn, 17360fefbfbaSSudarsana Kalluru p_hwfn->p_main_ptt, 17370fefbfbaSSudarsana Kalluru QED_OV_DRIVER_STATE_DISABLED); 17380fefbfbaSSudarsana Kalluru if (rc) 17390fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, "Failed to update driver state\n"); 17400fefbfbaSSudarsana Kalluru 17410fefbfbaSSudarsana Kalluru rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 17420fefbfbaSSudarsana Kalluru QED_OV_ESWITCH_VEB); 17430fefbfbaSSudarsana Kalluru if (rc) 17440fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 17450fefbfbaSSudarsana Kalluru } 17460fefbfbaSSudarsana Kalluru 1747fe56b9e6SYuval Mintz return 0; 1748fe56b9e6SYuval Mintz } 1749fe56b9e6SYuval Mintz 1750fe56b9e6SYuval Mintz #define QED_HW_STOP_RETRY_LIMIT (10) 17511a635e48SYuval Mintz static void qed_hw_timers_stop(struct qed_dev *cdev, 17521a635e48SYuval Mintz struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 17538c925c44SYuval Mintz { 17548c925c44SYuval Mintz int i; 17558c925c44SYuval Mintz 17568c925c44SYuval Mintz /* close timers */ 17578c925c44SYuval Mintz qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 17588c925c44SYuval Mintz qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 17598c925c44SYuval Mintz 17608c925c44SYuval Mintz for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { 17618c925c44SYuval Mintz if ((!qed_rd(p_hwfn, p_ptt, 17628c925c44SYuval Mintz TM_REG_PF_SCAN_ACTIVE_CONN)) && 17631a635e48SYuval Mintz (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) 17648c925c44SYuval Mintz break; 17658c925c44SYuval Mintz 17668c925c44SYuval Mintz /* Dependent on number of connection/tasks, possibly 17678c925c44SYuval Mintz * 1ms sleep is required between polls 17688c925c44SYuval Mintz */ 17698c925c44SYuval Mintz usleep_range(1000, 2000); 17708c925c44SYuval Mintz } 17718c925c44SYuval Mintz 17728c925c44SYuval Mintz if (i < QED_HW_STOP_RETRY_LIMIT) 17738c925c44SYuval Mintz return; 17748c925c44SYuval Mintz 17758c925c44SYuval Mintz DP_NOTICE(p_hwfn, 17768c925c44SYuval Mintz "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 17778c925c44SYuval Mintz (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 17788c925c44SYuval Mintz (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 17798c925c44SYuval Mintz } 17808c925c44SYuval Mintz 17818c925c44SYuval Mintz void qed_hw_timers_stop_all(struct qed_dev *cdev) 17828c925c44SYuval Mintz { 17838c925c44SYuval Mintz int j; 17848c925c44SYuval Mintz 17858c925c44SYuval Mintz for_each_hwfn(cdev, j) { 17868c925c44SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 17878c925c44SYuval Mintz struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; 17888c925c44SYuval Mintz 17898c925c44SYuval Mintz qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 17908c925c44SYuval Mintz } 17918c925c44SYuval Mintz } 17928c925c44SYuval Mintz 1793fe56b9e6SYuval Mintz int qed_hw_stop(struct qed_dev *cdev) 1794fe56b9e6SYuval Mintz { 17951226337aSTomer Tayar struct qed_hwfn *p_hwfn; 17961226337aSTomer Tayar struct qed_ptt *p_ptt; 17971226337aSTomer Tayar int rc, rc2 = 0; 17988c925c44SYuval Mintz int j; 1799fe56b9e6SYuval Mintz 1800fe56b9e6SYuval Mintz for_each_hwfn(cdev, j) { 18011226337aSTomer Tayar p_hwfn = &cdev->hwfns[j]; 18021226337aSTomer Tayar p_ptt = p_hwfn->p_main_ptt; 1803fe56b9e6SYuval Mintz 1804fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); 1805fe56b9e6SYuval Mintz 18061408cc1fSYuval Mintz if (IS_VF(cdev)) { 18070b55e27dSYuval Mintz qed_vf_pf_int_cleanup(p_hwfn); 18081226337aSTomer Tayar rc = qed_vf_pf_reset(p_hwfn); 18091226337aSTomer Tayar if (rc) { 18101226337aSTomer Tayar DP_NOTICE(p_hwfn, 18111226337aSTomer Tayar "qed_vf_pf_reset failed. rc = %d.\n", 18121226337aSTomer Tayar rc); 18131226337aSTomer Tayar rc2 = -EINVAL; 18141226337aSTomer Tayar } 18151408cc1fSYuval Mintz continue; 18161408cc1fSYuval Mintz } 18171408cc1fSYuval Mintz 1818fe56b9e6SYuval Mintz /* mark the hw as uninitialized... */ 1819fe56b9e6SYuval Mintz p_hwfn->hw_init_done = false; 1820fe56b9e6SYuval Mintz 18211226337aSTomer Tayar /* Send unload command to MCP */ 18221226337aSTomer Tayar rc = qed_mcp_unload_req(p_hwfn, p_ptt); 18231226337aSTomer Tayar if (rc) { 18248c925c44SYuval Mintz DP_NOTICE(p_hwfn, 18251226337aSTomer Tayar "Failed sending a UNLOAD_REQ command. rc = %d.\n", 18261226337aSTomer Tayar rc); 18271226337aSTomer Tayar rc2 = -EINVAL; 18281226337aSTomer Tayar } 18291226337aSTomer Tayar 18301226337aSTomer Tayar qed_slowpath_irq_sync(p_hwfn); 18311226337aSTomer Tayar 18321226337aSTomer Tayar /* After this point no MFW attentions are expected, e.g. prevent 18331226337aSTomer Tayar * race between pf stop and dcbx pf update. 18341226337aSTomer Tayar */ 18351226337aSTomer Tayar rc = qed_sp_pf_stop(p_hwfn); 18361226337aSTomer Tayar if (rc) { 18371226337aSTomer Tayar DP_NOTICE(p_hwfn, 18381226337aSTomer Tayar "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 18391226337aSTomer Tayar rc); 18401226337aSTomer Tayar rc2 = -EINVAL; 18411226337aSTomer Tayar } 1842fe56b9e6SYuval Mintz 1843fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, 1844fe56b9e6SYuval Mintz NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 1845fe56b9e6SYuval Mintz 1846fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1847fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 1848fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 1849fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1850fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 1851fe56b9e6SYuval Mintz 18528c925c44SYuval Mintz qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 1853fe56b9e6SYuval Mintz 1854fe56b9e6SYuval Mintz /* Disable Attention Generation */ 1855fe56b9e6SYuval Mintz qed_int_igu_disable_int(p_hwfn, p_ptt); 1856fe56b9e6SYuval Mintz 1857fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 1858fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 1859fe56b9e6SYuval Mintz 1860fe56b9e6SYuval Mintz qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 1861fe56b9e6SYuval Mintz 1862fe56b9e6SYuval Mintz /* Need to wait 1ms to guarantee SBs are cleared */ 1863fe56b9e6SYuval Mintz usleep_range(1000, 2000); 18641226337aSTomer Tayar 18651226337aSTomer Tayar /* Disable PF in HW blocks */ 18661226337aSTomer Tayar qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 18671226337aSTomer Tayar qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 18681226337aSTomer Tayar 18691226337aSTomer Tayar qed_mcp_unload_done(p_hwfn, p_ptt); 18701226337aSTomer Tayar if (rc) { 18711226337aSTomer Tayar DP_NOTICE(p_hwfn, 18721226337aSTomer Tayar "Failed sending a UNLOAD_DONE command. rc = %d.\n", 18731226337aSTomer Tayar rc); 18741226337aSTomer Tayar rc2 = -EINVAL; 18751226337aSTomer Tayar } 1876fe56b9e6SYuval Mintz } 1877fe56b9e6SYuval Mintz 18781408cc1fSYuval Mintz if (IS_PF(cdev)) { 18791226337aSTomer Tayar p_hwfn = QED_LEADING_HWFN(cdev); 18801226337aSTomer Tayar p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; 18811226337aSTomer Tayar 1882fe56b9e6SYuval Mintz /* Disable DMAE in PXP - in CMT, this should only be done for 1883fe56b9e6SYuval Mintz * first hw-function, and only after all transactions have 1884fe56b9e6SYuval Mintz * stopped for all active hw-functions. 1885fe56b9e6SYuval Mintz */ 18861226337aSTomer Tayar rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false); 18871226337aSTomer Tayar if (rc) { 18881226337aSTomer Tayar DP_NOTICE(p_hwfn, 18891226337aSTomer Tayar "qed_change_pci_hwfn failed. rc = %d.\n", rc); 18901226337aSTomer Tayar rc2 = -EINVAL; 18911226337aSTomer Tayar } 18921408cc1fSYuval Mintz } 1893fe56b9e6SYuval Mintz 18941226337aSTomer Tayar return rc2; 1895fe56b9e6SYuval Mintz } 1896fe56b9e6SYuval Mintz 189715582962SRahul Verma int qed_hw_stop_fastpath(struct qed_dev *cdev) 1898cee4d264SManish Chopra { 18998c925c44SYuval Mintz int j; 1900cee4d264SManish Chopra 1901cee4d264SManish Chopra for_each_hwfn(cdev, j) { 1902cee4d264SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 190315582962SRahul Verma struct qed_ptt *p_ptt; 1904cee4d264SManish Chopra 1905dacd88d6SYuval Mintz if (IS_VF(cdev)) { 1906dacd88d6SYuval Mintz qed_vf_pf_int_cleanup(p_hwfn); 1907dacd88d6SYuval Mintz continue; 1908dacd88d6SYuval Mintz } 190915582962SRahul Verma p_ptt = qed_ptt_acquire(p_hwfn); 191015582962SRahul Verma if (!p_ptt) 191115582962SRahul Verma return -EAGAIN; 1912dacd88d6SYuval Mintz 1913cee4d264SManish Chopra DP_VERBOSE(p_hwfn, 19141a635e48SYuval Mintz NETIF_MSG_IFDOWN, "Shutting down the fastpath\n"); 1915cee4d264SManish Chopra 1916cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, 1917cee4d264SManish Chopra NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 1918cee4d264SManish Chopra 1919cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1920cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 1921cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 1922cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1923cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 1924cee4d264SManish Chopra 1925cee4d264SManish Chopra qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 1926cee4d264SManish Chopra 1927cee4d264SManish Chopra /* Need to wait 1ms to guarantee SBs are cleared */ 1928cee4d264SManish Chopra usleep_range(1000, 2000); 192915582962SRahul Verma qed_ptt_release(p_hwfn, p_ptt); 1930cee4d264SManish Chopra } 1931cee4d264SManish Chopra 193215582962SRahul Verma return 0; 193315582962SRahul Verma } 193415582962SRahul Verma 193515582962SRahul Verma int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) 1936cee4d264SManish Chopra { 193715582962SRahul Verma struct qed_ptt *p_ptt; 193815582962SRahul Verma 1939dacd88d6SYuval Mintz if (IS_VF(p_hwfn->cdev)) 194015582962SRahul Verma return 0; 194115582962SRahul Verma 194215582962SRahul Verma p_ptt = qed_ptt_acquire(p_hwfn); 194315582962SRahul Verma if (!p_ptt) 194415582962SRahul Verma return -EAGAIN; 1945dacd88d6SYuval Mintz 1946cee4d264SManish Chopra /* Re-open incoming traffic */ 194715582962SRahul Verma qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 194815582962SRahul Verma qed_ptt_release(p_hwfn, p_ptt); 194915582962SRahul Verma 195015582962SRahul Verma return 0; 1951cee4d264SManish Chopra } 1952cee4d264SManish Chopra 1953fe56b9e6SYuval Mintz /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 1954fe56b9e6SYuval Mintz static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) 1955fe56b9e6SYuval Mintz { 1956fe56b9e6SYuval Mintz qed_ptt_pool_free(p_hwfn); 1957fe56b9e6SYuval Mintz kfree(p_hwfn->hw_info.p_igu_info); 19583587cb87STomer Tayar p_hwfn->hw_info.p_igu_info = NULL; 1959fe56b9e6SYuval Mintz } 1960fe56b9e6SYuval Mintz 1961fe56b9e6SYuval Mintz /* Setup bar access */ 196212e09c69SYuval Mintz static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) 1963fe56b9e6SYuval Mintz { 1964fe56b9e6SYuval Mintz /* clear indirect access */ 19659c79ddaaSMintz, Yuval if (QED_IS_AH(p_hwfn->cdev)) { 19669c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19679c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0); 19689c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19699c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0); 19709c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19719c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0); 19729c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19739c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0); 19749c79ddaaSMintz, Yuval } else { 19759c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19769c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 19779c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19789c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 19799c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19809c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 19819c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19829c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 19839c79ddaaSMintz, Yuval } 1984fe56b9e6SYuval Mintz 1985fe56b9e6SYuval Mintz /* Clean Previous errors if such exist */ 1986fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19871a635e48SYuval Mintz PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); 1988fe56b9e6SYuval Mintz 1989fe56b9e6SYuval Mintz /* enable internal target-read */ 1990fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1991fe56b9e6SYuval Mintz PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1992fe56b9e6SYuval Mintz } 1993fe56b9e6SYuval Mintz 1994fe56b9e6SYuval Mintz static void get_function_id(struct qed_hwfn *p_hwfn) 1995fe56b9e6SYuval Mintz { 1996fe56b9e6SYuval Mintz /* ME Register */ 19971a635e48SYuval Mintz p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 19981a635e48SYuval Mintz PXP_PF_ME_OPAQUE_ADDR); 1999fe56b9e6SYuval Mintz 2000fe56b9e6SYuval Mintz p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 2001fe56b9e6SYuval Mintz 2002fe56b9e6SYuval Mintz p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 2003fe56b9e6SYuval Mintz p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2004fe56b9e6SYuval Mintz PXP_CONCRETE_FID_PFID); 2005fe56b9e6SYuval Mintz p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2006fe56b9e6SYuval Mintz PXP_CONCRETE_FID_PORT); 2007525ef5c0SYuval Mintz 2008525ef5c0SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, 2009525ef5c0SYuval Mintz "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 2010525ef5c0SYuval Mintz p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 2011fe56b9e6SYuval Mintz } 2012fe56b9e6SYuval Mintz 201325c089d7SYuval Mintz static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) 201425c089d7SYuval Mintz { 201525c089d7SYuval Mintz u32 *feat_num = p_hwfn->hw_info.feat_num; 20165a1f965aSMintz, Yuval struct qed_sb_cnt_info sb_cnt_info; 2017810bb1f0SMintz, Yuval u32 non_l2_sbs = 0; 201825c089d7SYuval Mintz 20190189efb8SYuval Mintz if (IS_ENABLED(CONFIG_QED_RDMA) && 20200189efb8SYuval Mintz p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { 20210189efb8SYuval Mintz /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide 20220189efb8SYuval Mintz * the status blocks equally between L2 / RoCE but with 20230189efb8SYuval Mintz * consideration as to how many l2 queues / cnqs we have. 202451ff1725SRam Amrani */ 202551ff1725SRam Amrani feat_num[QED_RDMA_CNQ] = 2026810bb1f0SMintz, Yuval min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2, 202751ff1725SRam Amrani RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); 2028810bb1f0SMintz, Yuval 2029810bb1f0SMintz, Yuval non_l2_sbs = feat_num[QED_RDMA_CNQ]; 203051ff1725SRam Amrani } 20310189efb8SYuval Mintz 2032dec26533SMintz, Yuval if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || 2033dec26533SMintz, Yuval p_hwfn->hw_info.personality == QED_PCI_ETH) { 2034dec26533SMintz, Yuval /* Start by allocating VF queues, then PF's */ 2035dec26533SMintz, Yuval memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 2036dec26533SMintz, Yuval qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); 2037dec26533SMintz, Yuval feat_num[QED_VF_L2_QUE] = min_t(u32, 2038dec26533SMintz, Yuval RESC_NUM(p_hwfn, QED_L2_QUEUE), 2039dec26533SMintz, Yuval sb_cnt_info.sb_iov_cnt); 2040810bb1f0SMintz, Yuval feat_num[QED_PF_L2_QUE] = min_t(u32, 2041810bb1f0SMintz, Yuval RESC_NUM(p_hwfn, QED_SB) - 2042810bb1f0SMintz, Yuval non_l2_sbs, 2043dec26533SMintz, Yuval RESC_NUM(p_hwfn, 2044dec26533SMintz, Yuval QED_L2_QUEUE) - 2045dec26533SMintz, Yuval FEAT_NUM(p_hwfn, 2046dec26533SMintz, Yuval QED_VF_L2_QUE)); 2047dec26533SMintz, Yuval } 20485a1f965aSMintz, Yuval 204908737a3fSMintz, Yuval if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) 205008737a3fSMintz, Yuval feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB), 205108737a3fSMintz, Yuval RESC_NUM(p_hwfn, 205208737a3fSMintz, Yuval QED_CMDQS_CQS)); 20535a1f965aSMintz, Yuval DP_VERBOSE(p_hwfn, 20545a1f965aSMintz, Yuval NETIF_MSG_PROBE, 205508737a3fSMintz, Yuval "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n", 20565a1f965aSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), 20575a1f965aSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), 20585a1f965aSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), 205908737a3fSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), 2060810bb1f0SMintz, Yuval RESC_NUM(p_hwfn, QED_SB)); 206125c089d7SYuval Mintz } 206225c089d7SYuval Mintz 20639c8517c4STomer Tayar const char *qed_hw_get_resc_name(enum qed_resources res_id) 20642edbff8dSTomer Tayar { 20652edbff8dSTomer Tayar switch (res_id) { 20662edbff8dSTomer Tayar case QED_L2_QUEUE: 20672edbff8dSTomer Tayar return "L2_QUEUE"; 20682edbff8dSTomer Tayar case QED_VPORT: 20692edbff8dSTomer Tayar return "VPORT"; 20702edbff8dSTomer Tayar case QED_RSS_ENG: 20712edbff8dSTomer Tayar return "RSS_ENG"; 20722edbff8dSTomer Tayar case QED_PQ: 20732edbff8dSTomer Tayar return "PQ"; 20742edbff8dSTomer Tayar case QED_RL: 20752edbff8dSTomer Tayar return "RL"; 20762edbff8dSTomer Tayar case QED_MAC: 20772edbff8dSTomer Tayar return "MAC"; 20782edbff8dSTomer Tayar case QED_VLAN: 20792edbff8dSTomer Tayar return "VLAN"; 20802edbff8dSTomer Tayar case QED_RDMA_CNQ_RAM: 20812edbff8dSTomer Tayar return "RDMA_CNQ_RAM"; 20822edbff8dSTomer Tayar case QED_ILT: 20832edbff8dSTomer Tayar return "ILT"; 20842edbff8dSTomer Tayar case QED_LL2_QUEUE: 20852edbff8dSTomer Tayar return "LL2_QUEUE"; 20862edbff8dSTomer Tayar case QED_CMDQS_CQS: 20872edbff8dSTomer Tayar return "CMDQS_CQS"; 20882edbff8dSTomer Tayar case QED_RDMA_STATS_QUEUE: 20892edbff8dSTomer Tayar return "RDMA_STATS_QUEUE"; 20909c8517c4STomer Tayar case QED_BDQ: 20919c8517c4STomer Tayar return "BDQ"; 20929c8517c4STomer Tayar case QED_SB: 20939c8517c4STomer Tayar return "SB"; 20942edbff8dSTomer Tayar default: 20952edbff8dSTomer Tayar return "UNKNOWN_RESOURCE"; 20962edbff8dSTomer Tayar } 20972edbff8dSTomer Tayar } 20982edbff8dSTomer Tayar 20999c8517c4STomer Tayar static int 21009c8517c4STomer Tayar __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, 21019c8517c4STomer Tayar struct qed_ptt *p_ptt, 21029c8517c4STomer Tayar enum qed_resources res_id, 21039c8517c4STomer Tayar u32 resc_max_val, u32 *p_mcp_resp) 21049c8517c4STomer Tayar { 21059c8517c4STomer Tayar int rc; 21069c8517c4STomer Tayar 21079c8517c4STomer Tayar rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, 21089c8517c4STomer Tayar resc_max_val, p_mcp_resp); 21099c8517c4STomer Tayar if (rc) { 21109c8517c4STomer Tayar DP_NOTICE(p_hwfn, 21119c8517c4STomer Tayar "MFW response failure for a max value setting of resource %d [%s]\n", 21129c8517c4STomer Tayar res_id, qed_hw_get_resc_name(res_id)); 21139c8517c4STomer Tayar return rc; 21149c8517c4STomer Tayar } 21159c8517c4STomer Tayar 21169c8517c4STomer Tayar if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 21179c8517c4STomer Tayar DP_INFO(p_hwfn, 21189c8517c4STomer Tayar "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 21199c8517c4STomer Tayar res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp); 21209c8517c4STomer Tayar 21219c8517c4STomer Tayar return 0; 21229c8517c4STomer Tayar } 21239c8517c4STomer Tayar 21249c8517c4STomer Tayar static int 21259c8517c4STomer Tayar qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 21269c8517c4STomer Tayar { 21279c8517c4STomer Tayar bool b_ah = QED_IS_AH(p_hwfn->cdev); 21289c8517c4STomer Tayar u32 resc_max_val, mcp_resp; 21299c8517c4STomer Tayar u8 res_id; 21309c8517c4STomer Tayar int rc; 21319c8517c4STomer Tayar 21329c8517c4STomer Tayar for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { 21339c8517c4STomer Tayar switch (res_id) { 21349c8517c4STomer Tayar case QED_LL2_QUEUE: 21359c8517c4STomer Tayar resc_max_val = MAX_NUM_LL2_RX_QUEUES; 21369c8517c4STomer Tayar break; 21379c8517c4STomer Tayar case QED_RDMA_CNQ_RAM: 21389c8517c4STomer Tayar /* No need for a case for QED_CMDQS_CQS since 21399c8517c4STomer Tayar * CNQ/CMDQS are the same resource. 21409c8517c4STomer Tayar */ 21419c8517c4STomer Tayar resc_max_val = NUM_OF_CMDQS_CQS; 21429c8517c4STomer Tayar break; 21439c8517c4STomer Tayar case QED_RDMA_STATS_QUEUE: 21449c8517c4STomer Tayar resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 21459c8517c4STomer Tayar : RDMA_NUM_STATISTIC_COUNTERS_BB; 21469c8517c4STomer Tayar break; 21479c8517c4STomer Tayar case QED_BDQ: 21489c8517c4STomer Tayar resc_max_val = BDQ_NUM_RESOURCES; 21499c8517c4STomer Tayar break; 21509c8517c4STomer Tayar default: 21519c8517c4STomer Tayar continue; 21529c8517c4STomer Tayar } 21539c8517c4STomer Tayar 21549c8517c4STomer Tayar rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, 21559c8517c4STomer Tayar resc_max_val, &mcp_resp); 21569c8517c4STomer Tayar if (rc) 21579c8517c4STomer Tayar return rc; 21589c8517c4STomer Tayar 21599c8517c4STomer Tayar /* There's no point to continue to the next resource if the 21609c8517c4STomer Tayar * command is not supported by the MFW. 21619c8517c4STomer Tayar * We do continue if the command is supported but the resource 21629c8517c4STomer Tayar * is unknown to the MFW. Such a resource will be later 21639c8517c4STomer Tayar * configured with the default allocation values. 21649c8517c4STomer Tayar */ 21659c8517c4STomer Tayar if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 21669c8517c4STomer Tayar return -EINVAL; 21679c8517c4STomer Tayar } 21689c8517c4STomer Tayar 21699c8517c4STomer Tayar return 0; 21709c8517c4STomer Tayar } 21719c8517c4STomer Tayar 21729c8517c4STomer Tayar static 21739c8517c4STomer Tayar int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, 21749c8517c4STomer Tayar enum qed_resources res_id, 21759c8517c4STomer Tayar u32 *p_resc_num, u32 *p_resc_start) 21769c8517c4STomer Tayar { 21779c8517c4STomer Tayar u8 num_funcs = p_hwfn->num_funcs_on_engine; 21789c8517c4STomer Tayar bool b_ah = QED_IS_AH(p_hwfn->cdev); 21799c8517c4STomer Tayar struct qed_sb_cnt_info sb_cnt_info; 21809c8517c4STomer Tayar 21819c8517c4STomer Tayar switch (res_id) { 21829c8517c4STomer Tayar case QED_L2_QUEUE: 21839c8517c4STomer Tayar *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 21849c8517c4STomer Tayar MAX_NUM_L2_QUEUES_BB) / num_funcs; 21859c8517c4STomer Tayar break; 21869c8517c4STomer Tayar case QED_VPORT: 21879c8517c4STomer Tayar *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 21889c8517c4STomer Tayar MAX_NUM_VPORTS_BB) / num_funcs; 21899c8517c4STomer Tayar break; 21909c8517c4STomer Tayar case QED_RSS_ENG: 21919c8517c4STomer Tayar *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 21929c8517c4STomer Tayar ETH_RSS_ENGINE_NUM_BB) / num_funcs; 21939c8517c4STomer Tayar break; 21949c8517c4STomer Tayar case QED_PQ: 21959c8517c4STomer Tayar *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 21969c8517c4STomer Tayar MAX_QM_TX_QUEUES_BB) / num_funcs; 21979c8517c4STomer Tayar *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ 21989c8517c4STomer Tayar break; 21999c8517c4STomer Tayar case QED_RL: 22009c8517c4STomer Tayar *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 22019c8517c4STomer Tayar break; 22029c8517c4STomer Tayar case QED_MAC: 22039c8517c4STomer Tayar case QED_VLAN: 22049c8517c4STomer Tayar /* Each VFC resource can accommodate both a MAC and a VLAN */ 22059c8517c4STomer Tayar *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 22069c8517c4STomer Tayar break; 22079c8517c4STomer Tayar case QED_ILT: 22089c8517c4STomer Tayar *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 22099c8517c4STomer Tayar PXP_NUM_ILT_RECORDS_BB) / num_funcs; 22109c8517c4STomer Tayar break; 22119c8517c4STomer Tayar case QED_LL2_QUEUE: 22129c8517c4STomer Tayar *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 22139c8517c4STomer Tayar break; 22149c8517c4STomer Tayar case QED_RDMA_CNQ_RAM: 22159c8517c4STomer Tayar case QED_CMDQS_CQS: 22169c8517c4STomer Tayar /* CNQ/CMDQS are the same resource */ 22179c8517c4STomer Tayar *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs; 22189c8517c4STomer Tayar break; 22199c8517c4STomer Tayar case QED_RDMA_STATS_QUEUE: 22209c8517c4STomer Tayar *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : 22219c8517c4STomer Tayar RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs; 22229c8517c4STomer Tayar break; 22239c8517c4STomer Tayar case QED_BDQ: 22249c8517c4STomer Tayar if (p_hwfn->hw_info.personality != QED_PCI_ISCSI && 22259c8517c4STomer Tayar p_hwfn->hw_info.personality != QED_PCI_FCOE) 22269c8517c4STomer Tayar *p_resc_num = 0; 22279c8517c4STomer Tayar else 22289c8517c4STomer Tayar *p_resc_num = 1; 22299c8517c4STomer Tayar break; 22309c8517c4STomer Tayar case QED_SB: 22319c8517c4STomer Tayar memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 22329c8517c4STomer Tayar qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); 22339c8517c4STomer Tayar *p_resc_num = sb_cnt_info.sb_cnt; 22349c8517c4STomer Tayar break; 22359c8517c4STomer Tayar default: 22369c8517c4STomer Tayar return -EINVAL; 22379c8517c4STomer Tayar } 22389c8517c4STomer Tayar 22399c8517c4STomer Tayar switch (res_id) { 22409c8517c4STomer Tayar case QED_BDQ: 22419c8517c4STomer Tayar if (!*p_resc_num) 22429c8517c4STomer Tayar *p_resc_start = 0; 22439c8517c4STomer Tayar else if (p_hwfn->cdev->num_ports_in_engines == 4) 22449c8517c4STomer Tayar *p_resc_start = p_hwfn->port_id; 22459c8517c4STomer Tayar else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) 22469c8517c4STomer Tayar *p_resc_start = p_hwfn->port_id; 22479c8517c4STomer Tayar else if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 22489c8517c4STomer Tayar *p_resc_start = p_hwfn->port_id + 2; 22499c8517c4STomer Tayar break; 22509c8517c4STomer Tayar default: 22519c8517c4STomer Tayar *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 22529c8517c4STomer Tayar break; 22539c8517c4STomer Tayar } 22549c8517c4STomer Tayar 22559c8517c4STomer Tayar return 0; 22569c8517c4STomer Tayar } 22579c8517c4STomer Tayar 22589c8517c4STomer Tayar static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, 22592edbff8dSTomer Tayar enum qed_resources res_id) 22602edbff8dSTomer Tayar { 22619c8517c4STomer Tayar u32 dflt_resc_num = 0, dflt_resc_start = 0; 22629c8517c4STomer Tayar u32 mcp_resp, *p_resc_num, *p_resc_start; 22632edbff8dSTomer Tayar int rc; 22642edbff8dSTomer Tayar 22652edbff8dSTomer Tayar p_resc_num = &RESC_NUM(p_hwfn, res_id); 22662edbff8dSTomer Tayar p_resc_start = &RESC_START(p_hwfn, res_id); 22672edbff8dSTomer Tayar 22689c8517c4STomer Tayar rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 22699c8517c4STomer Tayar &dflt_resc_start); 22709c8517c4STomer Tayar if (rc) { 22712edbff8dSTomer Tayar DP_ERR(p_hwfn, 22722edbff8dSTomer Tayar "Failed to get default amount for resource %d [%s]\n", 22732edbff8dSTomer Tayar res_id, qed_hw_get_resc_name(res_id)); 22749c8517c4STomer Tayar return rc; 22752edbff8dSTomer Tayar } 22762edbff8dSTomer Tayar 22779c8517c4STomer Tayar rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 22789c8517c4STomer Tayar &mcp_resp, p_resc_num, p_resc_start); 22792edbff8dSTomer Tayar if (rc) { 22802edbff8dSTomer Tayar DP_NOTICE(p_hwfn, 22812edbff8dSTomer Tayar "MFW response failure for an allocation request for resource %d [%s]\n", 22822edbff8dSTomer Tayar res_id, qed_hw_get_resc_name(res_id)); 22832edbff8dSTomer Tayar return rc; 22842edbff8dSTomer Tayar } 22852edbff8dSTomer Tayar 22862edbff8dSTomer Tayar /* Default driver values are applied in the following cases: 22872edbff8dSTomer Tayar * - The resource allocation MB command is not supported by the MFW 22882edbff8dSTomer Tayar * - There is an internal error in the MFW while processing the request 22892edbff8dSTomer Tayar * - The resource ID is unknown to the MFW 22902edbff8dSTomer Tayar */ 22919c8517c4STomer Tayar if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 22929c8517c4STomer Tayar DP_INFO(p_hwfn, 22939c8517c4STomer Tayar "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", 22942edbff8dSTomer Tayar res_id, 22952edbff8dSTomer Tayar qed_hw_get_resc_name(res_id), 22962edbff8dSTomer Tayar mcp_resp, dflt_resc_num, dflt_resc_start); 22972edbff8dSTomer Tayar *p_resc_num = dflt_resc_num; 22982edbff8dSTomer Tayar *p_resc_start = dflt_resc_start; 22992edbff8dSTomer Tayar goto out; 23002edbff8dSTomer Tayar } 23012edbff8dSTomer Tayar 23022edbff8dSTomer Tayar /* Special handling for status blocks; Would be revised in future */ 23032edbff8dSTomer Tayar if (res_id == QED_SB) { 23049c8517c4STomer Tayar *p_resc_num -= 1; 23059c8517c4STomer Tayar *p_resc_start -= p_hwfn->enabled_func_idx; 23062edbff8dSTomer Tayar } 23072edbff8dSTomer Tayar out: 23082edbff8dSTomer Tayar /* PQs have to divide by 8 [that's the HW granularity]. 23092edbff8dSTomer Tayar * Reduce number so it would fit. 23102edbff8dSTomer Tayar */ 23112edbff8dSTomer Tayar if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) { 23122edbff8dSTomer Tayar DP_INFO(p_hwfn, 23132edbff8dSTomer Tayar "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", 23142edbff8dSTomer Tayar *p_resc_num, 23152edbff8dSTomer Tayar (*p_resc_num) & ~0x7, 23162edbff8dSTomer Tayar *p_resc_start, (*p_resc_start) & ~0x7); 23172edbff8dSTomer Tayar *p_resc_num &= ~0x7; 23182edbff8dSTomer Tayar *p_resc_start &= ~0x7; 23192edbff8dSTomer Tayar } 23202edbff8dSTomer Tayar 23212edbff8dSTomer Tayar return 0; 23222edbff8dSTomer Tayar } 23232edbff8dSTomer Tayar 23249c8517c4STomer Tayar static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn) 2325fe56b9e6SYuval Mintz { 23269c8517c4STomer Tayar int rc; 23279c8517c4STomer Tayar u8 res_id; 23289c8517c4STomer Tayar 23299c8517c4STomer Tayar for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { 23309c8517c4STomer Tayar rc = __qed_hw_set_resc_info(p_hwfn, res_id); 23319c8517c4STomer Tayar if (rc) 23329c8517c4STomer Tayar return rc; 23339c8517c4STomer Tayar } 23349c8517c4STomer Tayar 23359c8517c4STomer Tayar return 0; 23369c8517c4STomer Tayar } 23379c8517c4STomer Tayar 23389c8517c4STomer Tayar static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 23399c8517c4STomer Tayar { 23409c8517c4STomer Tayar struct qed_resc_unlock_params resc_unlock_params; 23419c8517c4STomer Tayar struct qed_resc_lock_params resc_lock_params; 23429c79ddaaSMintz, Yuval bool b_ah = QED_IS_AH(p_hwfn->cdev); 23432edbff8dSTomer Tayar u8 res_id; 23442edbff8dSTomer Tayar int rc; 2345fe56b9e6SYuval Mintz 23469c8517c4STomer Tayar /* Setting the max values of the soft resources and the following 23479c8517c4STomer Tayar * resources allocation queries should be atomic. Since several PFs can 23489c8517c4STomer Tayar * run in parallel - a resource lock is needed. 23499c8517c4STomer Tayar * If either the resource lock or resource set value commands are not 23509c8517c4STomer Tayar * supported - skip the the max values setting, release the lock if 23519c8517c4STomer Tayar * needed, and proceed to the queries. Other failures, including a 23529c8517c4STomer Tayar * failure to acquire the lock, will cause this function to fail. 23539c8517c4STomer Tayar */ 2354f470f22cSsudarsana.kalluru@cavium.com qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, 2355f470f22cSsudarsana.kalluru@cavium.com QED_RESC_LOCK_RESC_ALLOC, false); 23569c8517c4STomer Tayar 23579c8517c4STomer Tayar rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); 23589c8517c4STomer Tayar if (rc && rc != -EINVAL) { 23592edbff8dSTomer Tayar return rc; 23609c8517c4STomer Tayar } else if (rc == -EINVAL) { 23619c8517c4STomer Tayar DP_INFO(p_hwfn, 23629c8517c4STomer Tayar "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 23639c8517c4STomer Tayar } else if (!rc && !resc_lock_params.b_granted) { 23649c8517c4STomer Tayar DP_NOTICE(p_hwfn, 23659c8517c4STomer Tayar "Failed to acquire the resource lock for the resource allocation commands\n"); 23669c8517c4STomer Tayar return -EBUSY; 23679c8517c4STomer Tayar } else { 23689c8517c4STomer Tayar rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt); 23699c8517c4STomer Tayar if (rc && rc != -EINVAL) { 23709c8517c4STomer Tayar DP_NOTICE(p_hwfn, 23719c8517c4STomer Tayar "Failed to set the max values of the soft resources\n"); 23729c8517c4STomer Tayar goto unlock_and_exit; 23739c8517c4STomer Tayar } else if (rc == -EINVAL) { 23749c8517c4STomer Tayar DP_INFO(p_hwfn, 23759c8517c4STomer Tayar "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 23769c8517c4STomer Tayar rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, 23779c8517c4STomer Tayar &resc_unlock_params); 23789c8517c4STomer Tayar if (rc) 23799c8517c4STomer Tayar DP_INFO(p_hwfn, 23809c8517c4STomer Tayar "Failed to release the resource lock for the resource allocation commands\n"); 23819c8517c4STomer Tayar } 23829c8517c4STomer Tayar } 23839c8517c4STomer Tayar 23849c8517c4STomer Tayar rc = qed_hw_set_resc_info(p_hwfn); 23859c8517c4STomer Tayar if (rc) 23869c8517c4STomer Tayar goto unlock_and_exit; 23879c8517c4STomer Tayar 23889c8517c4STomer Tayar if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 23899c8517c4STomer Tayar rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); 23909c8517c4STomer Tayar if (rc) 23919c8517c4STomer Tayar DP_INFO(p_hwfn, 23929c8517c4STomer Tayar "Failed to release the resource lock for the resource allocation commands\n"); 23932edbff8dSTomer Tayar } 2394dbb799c3SYuval Mintz 2395dbb799c3SYuval Mintz /* Sanity for ILT */ 23969c79ddaaSMintz, Yuval if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 23979c79ddaaSMintz, Yuval (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 2398dbb799c3SYuval Mintz DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", 2399dbb799c3SYuval Mintz RESC_START(p_hwfn, QED_ILT), 2400dbb799c3SYuval Mintz RESC_END(p_hwfn, QED_ILT) - 1); 2401dbb799c3SYuval Mintz return -EINVAL; 2402dbb799c3SYuval Mintz } 2403fe56b9e6SYuval Mintz 240425c089d7SYuval Mintz qed_hw_set_feat(p_hwfn); 240525c089d7SYuval Mintz 24062edbff8dSTomer Tayar for (res_id = 0; res_id < QED_MAX_RESC; res_id++) 24072edbff8dSTomer Tayar DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n", 24082edbff8dSTomer Tayar qed_hw_get_resc_name(res_id), 24092edbff8dSTomer Tayar RESC_NUM(p_hwfn, res_id), 24102edbff8dSTomer Tayar RESC_START(p_hwfn, res_id)); 2411dbb799c3SYuval Mintz 2412dbb799c3SYuval Mintz return 0; 24139c8517c4STomer Tayar 24149c8517c4STomer Tayar unlock_and_exit: 24159c8517c4STomer Tayar if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 24169c8517c4STomer Tayar qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); 24179c8517c4STomer Tayar return rc; 2418fe56b9e6SYuval Mintz } 2419fe56b9e6SYuval Mintz 24201a635e48SYuval Mintz static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2421fe56b9e6SYuval Mintz { 2422fc48b7a6SYuval Mintz u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 24231e128c81SArun Easi u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; 2424cc875c2eSYuval Mintz struct qed_mcp_link_params *link; 2425fe56b9e6SYuval Mintz 2426fe56b9e6SYuval Mintz /* Read global nvm_cfg address */ 2427fe56b9e6SYuval Mintz nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 2428fe56b9e6SYuval Mintz 2429fe56b9e6SYuval Mintz /* Verify MCP has initialized it */ 2430fe56b9e6SYuval Mintz if (!nvm_cfg_addr) { 2431fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); 2432fe56b9e6SYuval Mintz return -EINVAL; 2433fe56b9e6SYuval Mintz } 2434fe56b9e6SYuval Mintz 2435fe56b9e6SYuval Mintz /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 2436fe56b9e6SYuval Mintz nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 2437fe56b9e6SYuval Mintz 2438cc875c2eSYuval Mintz addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2439cc875c2eSYuval Mintz offsetof(struct nvm_cfg1, glob) + 2440cc875c2eSYuval Mintz offsetof(struct nvm_cfg1_glob, core_cfg); 2441cc875c2eSYuval Mintz 2442cc875c2eSYuval Mintz core_cfg = qed_rd(p_hwfn, p_ptt, addr); 2443cc875c2eSYuval Mintz 2444cc875c2eSYuval Mintz switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 2445cc875c2eSYuval Mintz NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 2446351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 2447cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; 2448cc875c2eSYuval Mintz break; 2449351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 2450cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; 2451cc875c2eSYuval Mintz break; 2452351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 2453cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; 2454cc875c2eSYuval Mintz break; 2455351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 2456cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; 2457cc875c2eSYuval Mintz break; 2458351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 2459cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; 2460cc875c2eSYuval Mintz break; 2461351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 2462cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; 2463cc875c2eSYuval Mintz break; 2464351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 2465cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; 2466cc875c2eSYuval Mintz break; 2467351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 2468cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; 2469cc875c2eSYuval Mintz break; 24709c79ddaaSMintz, Yuval case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 24719c79ddaaSMintz, Yuval p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G; 24729c79ddaaSMintz, Yuval break; 2473351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 2474cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; 2475cc875c2eSYuval Mintz break; 24769c79ddaaSMintz, Yuval case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 24779c79ddaaSMintz, Yuval p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G; 24789c79ddaaSMintz, Yuval break; 2479cc875c2eSYuval Mintz default: 24801a635e48SYuval Mintz DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); 2481cc875c2eSYuval Mintz break; 2482cc875c2eSYuval Mintz } 2483cc875c2eSYuval Mintz 2484cc875c2eSYuval Mintz /* Read default link configuration */ 2485cc875c2eSYuval Mintz link = &p_hwfn->mcp_info->link_input; 2486cc875c2eSYuval Mintz port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2487cc875c2eSYuval Mintz offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 2488cc875c2eSYuval Mintz link_temp = qed_rd(p_hwfn, p_ptt, 2489cc875c2eSYuval Mintz port_cfg_addr + 2490cc875c2eSYuval Mintz offsetof(struct nvm_cfg1_port, speed_cap_mask)); 249183aeb933SYuval Mintz link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 249283aeb933SYuval Mintz link->speed.advertised_speeds = link_temp; 2493cc875c2eSYuval Mintz 249483aeb933SYuval Mintz link_temp = link->speed.advertised_speeds; 249583aeb933SYuval Mintz p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; 2496cc875c2eSYuval Mintz 2497cc875c2eSYuval Mintz link_temp = qed_rd(p_hwfn, p_ptt, 2498cc875c2eSYuval Mintz port_cfg_addr + 2499cc875c2eSYuval Mintz offsetof(struct nvm_cfg1_port, link_settings)); 2500cc875c2eSYuval Mintz switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 2501cc875c2eSYuval Mintz NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 2502cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 2503cc875c2eSYuval Mintz link->speed.autoneg = true; 2504cc875c2eSYuval Mintz break; 2505cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 2506cc875c2eSYuval Mintz link->speed.forced_speed = 1000; 2507cc875c2eSYuval Mintz break; 2508cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 2509cc875c2eSYuval Mintz link->speed.forced_speed = 10000; 2510cc875c2eSYuval Mintz break; 2511cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 2512cc875c2eSYuval Mintz link->speed.forced_speed = 25000; 2513cc875c2eSYuval Mintz break; 2514cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 2515cc875c2eSYuval Mintz link->speed.forced_speed = 40000; 2516cc875c2eSYuval Mintz break; 2517cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 2518cc875c2eSYuval Mintz link->speed.forced_speed = 50000; 2519cc875c2eSYuval Mintz break; 2520351a4dedSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 2521cc875c2eSYuval Mintz link->speed.forced_speed = 100000; 2522cc875c2eSYuval Mintz break; 2523cc875c2eSYuval Mintz default: 25241a635e48SYuval Mintz DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); 2525cc875c2eSYuval Mintz } 2526cc875c2eSYuval Mintz 252734f9199cSsudarsana.kalluru@cavium.com p_hwfn->mcp_info->link_capabilities.default_speed_autoneg = 252834f9199cSsudarsana.kalluru@cavium.com link->speed.autoneg; 252934f9199cSsudarsana.kalluru@cavium.com 2530cc875c2eSYuval Mintz link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 2531cc875c2eSYuval Mintz link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 2532cc875c2eSYuval Mintz link->pause.autoneg = !!(link_temp & 2533cc875c2eSYuval Mintz NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 2534cc875c2eSYuval Mintz link->pause.forced_rx = !!(link_temp & 2535cc875c2eSYuval Mintz NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 2536cc875c2eSYuval Mintz link->pause.forced_tx = !!(link_temp & 2537cc875c2eSYuval Mintz NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 2538cc875c2eSYuval Mintz link->loopback_mode = 0; 2539cc875c2eSYuval Mintz 2540cc875c2eSYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 2541cc875c2eSYuval Mintz "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n", 2542cc875c2eSYuval Mintz link->speed.forced_speed, link->speed.advertised_speeds, 2543cc875c2eSYuval Mintz link->speed.autoneg, link->pause.autoneg); 2544cc875c2eSYuval Mintz 2545fe56b9e6SYuval Mintz /* Read Multi-function information from shmem */ 2546fe56b9e6SYuval Mintz addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2547fe56b9e6SYuval Mintz offsetof(struct nvm_cfg1, glob) + 2548fe56b9e6SYuval Mintz offsetof(struct nvm_cfg1_glob, generic_cont0); 2549fe56b9e6SYuval Mintz 2550fe56b9e6SYuval Mintz generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); 2551fe56b9e6SYuval Mintz 2552fe56b9e6SYuval Mintz mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 2553fe56b9e6SYuval Mintz NVM_CFG1_GLOB_MF_MODE_OFFSET; 2554fe56b9e6SYuval Mintz 2555fe56b9e6SYuval Mintz switch (mf_mode) { 2556fe56b9e6SYuval Mintz case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 2557fc48b7a6SYuval Mintz p_hwfn->cdev->mf_mode = QED_MF_OVLAN; 2558fe56b9e6SYuval Mintz break; 2559fe56b9e6SYuval Mintz case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 2560fc48b7a6SYuval Mintz p_hwfn->cdev->mf_mode = QED_MF_NPAR; 2561fe56b9e6SYuval Mintz break; 2562fc48b7a6SYuval Mintz case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 2563fc48b7a6SYuval Mintz p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; 2564fe56b9e6SYuval Mintz break; 2565fe56b9e6SYuval Mintz } 2566fe56b9e6SYuval Mintz DP_INFO(p_hwfn, "Multi function mode is %08x\n", 2567fe56b9e6SYuval Mintz p_hwfn->cdev->mf_mode); 2568fe56b9e6SYuval Mintz 2569fc48b7a6SYuval Mintz /* Read Multi-function information from shmem */ 2570fc48b7a6SYuval Mintz addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2571fc48b7a6SYuval Mintz offsetof(struct nvm_cfg1, glob) + 2572fc48b7a6SYuval Mintz offsetof(struct nvm_cfg1_glob, device_capabilities); 2573fc48b7a6SYuval Mintz 2574fc48b7a6SYuval Mintz device_capabilities = qed_rd(p_hwfn, p_ptt, addr); 2575fc48b7a6SYuval Mintz if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 2576fc48b7a6SYuval Mintz __set_bit(QED_DEV_CAP_ETH, 2577fc48b7a6SYuval Mintz &p_hwfn->hw_info.device_capabilities); 25781e128c81SArun Easi if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 25791e128c81SArun Easi __set_bit(QED_DEV_CAP_FCOE, 25801e128c81SArun Easi &p_hwfn->hw_info.device_capabilities); 2581c5ac9319SYuval Mintz if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 2582c5ac9319SYuval Mintz __set_bit(QED_DEV_CAP_ISCSI, 2583c5ac9319SYuval Mintz &p_hwfn->hw_info.device_capabilities); 2584c5ac9319SYuval Mintz if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 2585c5ac9319SYuval Mintz __set_bit(QED_DEV_CAP_ROCE, 2586c5ac9319SYuval Mintz &p_hwfn->hw_info.device_capabilities); 2587fc48b7a6SYuval Mintz 2588fe56b9e6SYuval Mintz return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 2589fe56b9e6SYuval Mintz } 2590fe56b9e6SYuval Mintz 25911408cc1fSYuval Mintz static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 25921408cc1fSYuval Mintz { 2593dbb799c3SYuval Mintz u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 2594dbb799c3SYuval Mintz u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 25959c79ddaaSMintz, Yuval struct qed_dev *cdev = p_hwfn->cdev; 25961408cc1fSYuval Mintz 25979c79ddaaSMintz, Yuval num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 25981408cc1fSYuval Mintz 25991408cc1fSYuval Mintz /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 26001408cc1fSYuval Mintz * in the other bits are selected. 26011408cc1fSYuval Mintz * Bits 1-15 are for functions 1-15, respectively, and their value is 26021408cc1fSYuval Mintz * '0' only for enabled functions (function 0 always exists and 26031408cc1fSYuval Mintz * enabled). 26041408cc1fSYuval Mintz * In case of CMT, only the "even" functions are enabled, and thus the 26051408cc1fSYuval Mintz * number of functions for both hwfns is learnt from the same bits. 26061408cc1fSYuval Mintz */ 26071408cc1fSYuval Mintz reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); 26081408cc1fSYuval Mintz 26091408cc1fSYuval Mintz if (reg_function_hide & 0x1) { 26109c79ddaaSMintz, Yuval if (QED_IS_BB(cdev)) { 26119c79ddaaSMintz, Yuval if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) { 26121408cc1fSYuval Mintz num_funcs = 0; 26131408cc1fSYuval Mintz eng_mask = 0xaaaa; 26141408cc1fSYuval Mintz } else { 26151408cc1fSYuval Mintz num_funcs = 1; 26161408cc1fSYuval Mintz eng_mask = 0x5554; 26171408cc1fSYuval Mintz } 26189c79ddaaSMintz, Yuval } else { 26199c79ddaaSMintz, Yuval num_funcs = 1; 26209c79ddaaSMintz, Yuval eng_mask = 0xfffe; 26219c79ddaaSMintz, Yuval } 26221408cc1fSYuval Mintz 26231408cc1fSYuval Mintz /* Get the number of the enabled functions on the engine */ 26241408cc1fSYuval Mintz tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 26251408cc1fSYuval Mintz while (tmp) { 26261408cc1fSYuval Mintz if (tmp & 0x1) 26271408cc1fSYuval Mintz num_funcs++; 26281408cc1fSYuval Mintz tmp >>= 0x1; 26291408cc1fSYuval Mintz } 2630dbb799c3SYuval Mintz 2631dbb799c3SYuval Mintz /* Get the PF index within the enabled functions */ 2632dbb799c3SYuval Mintz low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 2633dbb799c3SYuval Mintz tmp = reg_function_hide & eng_mask & low_pfs_mask; 2634dbb799c3SYuval Mintz while (tmp) { 2635dbb799c3SYuval Mintz if (tmp & 0x1) 2636dbb799c3SYuval Mintz enabled_func_idx--; 2637dbb799c3SYuval Mintz tmp >>= 0x1; 2638dbb799c3SYuval Mintz } 26391408cc1fSYuval Mintz } 26401408cc1fSYuval Mintz 26411408cc1fSYuval Mintz p_hwfn->num_funcs_on_engine = num_funcs; 2642dbb799c3SYuval Mintz p_hwfn->enabled_func_idx = enabled_func_idx; 26431408cc1fSYuval Mintz 26441408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 26451408cc1fSYuval Mintz NETIF_MSG_PROBE, 2646525ef5c0SYuval Mintz "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 26471408cc1fSYuval Mintz p_hwfn->rel_pf_id, 26481408cc1fSYuval Mintz p_hwfn->abs_pf_id, 2649525ef5c0SYuval Mintz p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 26501408cc1fSYuval Mintz } 26511408cc1fSYuval Mintz 26529c79ddaaSMintz, Yuval static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, 26539c79ddaaSMintz, Yuval struct qed_ptt *p_ptt) 2654fe56b9e6SYuval Mintz { 2655fe56b9e6SYuval Mintz u32 port_mode; 2656fe56b9e6SYuval Mintz 26579c79ddaaSMintz, Yuval port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); 2658fe56b9e6SYuval Mintz 2659fe56b9e6SYuval Mintz if (port_mode < 3) { 2660fe56b9e6SYuval Mintz p_hwfn->cdev->num_ports_in_engines = 1; 2661fe56b9e6SYuval Mintz } else if (port_mode <= 5) { 2662fe56b9e6SYuval Mintz p_hwfn->cdev->num_ports_in_engines = 2; 2663fe56b9e6SYuval Mintz } else { 2664fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", 2665fe56b9e6SYuval Mintz p_hwfn->cdev->num_ports_in_engines); 2666fe56b9e6SYuval Mintz 2667fe56b9e6SYuval Mintz /* Default num_ports_in_engines to something */ 2668fe56b9e6SYuval Mintz p_hwfn->cdev->num_ports_in_engines = 1; 2669fe56b9e6SYuval Mintz } 26709c79ddaaSMintz, Yuval } 26719c79ddaaSMintz, Yuval 26729c79ddaaSMintz, Yuval static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn, 26739c79ddaaSMintz, Yuval struct qed_ptt *p_ptt) 26749c79ddaaSMintz, Yuval { 26759c79ddaaSMintz, Yuval u32 port; 26769c79ddaaSMintz, Yuval int i; 26779c79ddaaSMintz, Yuval 26789c79ddaaSMintz, Yuval p_hwfn->cdev->num_ports_in_engines = 0; 26799c79ddaaSMintz, Yuval 26809c79ddaaSMintz, Yuval for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 26819c79ddaaSMintz, Yuval port = qed_rd(p_hwfn, p_ptt, 26829c79ddaaSMintz, Yuval CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4)); 26839c79ddaaSMintz, Yuval if (port & 1) 26849c79ddaaSMintz, Yuval p_hwfn->cdev->num_ports_in_engines++; 26859c79ddaaSMintz, Yuval } 26869c79ddaaSMintz, Yuval 26879c79ddaaSMintz, Yuval if (!p_hwfn->cdev->num_ports_in_engines) { 26889c79ddaaSMintz, Yuval DP_NOTICE(p_hwfn, "All NIG ports are inactive\n"); 26899c79ddaaSMintz, Yuval 26909c79ddaaSMintz, Yuval /* Default num_ports_in_engine to something */ 26919c79ddaaSMintz, Yuval p_hwfn->cdev->num_ports_in_engines = 1; 26929c79ddaaSMintz, Yuval } 26939c79ddaaSMintz, Yuval } 26949c79ddaaSMintz, Yuval 26959c79ddaaSMintz, Yuval static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 26969c79ddaaSMintz, Yuval { 26979c79ddaaSMintz, Yuval if (QED_IS_BB(p_hwfn->cdev)) 26989c79ddaaSMintz, Yuval qed_hw_info_port_num_bb(p_hwfn, p_ptt); 26999c79ddaaSMintz, Yuval else 27009c79ddaaSMintz, Yuval qed_hw_info_port_num_ah(p_hwfn, p_ptt); 27019c79ddaaSMintz, Yuval } 27029c79ddaaSMintz, Yuval 27039c79ddaaSMintz, Yuval static int 27049c79ddaaSMintz, Yuval qed_get_hw_info(struct qed_hwfn *p_hwfn, 27059c79ddaaSMintz, Yuval struct qed_ptt *p_ptt, 27069c79ddaaSMintz, Yuval enum qed_pci_personality personality) 27079c79ddaaSMintz, Yuval { 27089c79ddaaSMintz, Yuval int rc; 27099c79ddaaSMintz, Yuval 27109c79ddaaSMintz, Yuval /* Since all information is common, only first hwfns should do this */ 27119c79ddaaSMintz, Yuval if (IS_LEAD_HWFN(p_hwfn)) { 27129c79ddaaSMintz, Yuval rc = qed_iov_hw_info(p_hwfn); 27139c79ddaaSMintz, Yuval if (rc) 27149c79ddaaSMintz, Yuval return rc; 27159c79ddaaSMintz, Yuval } 27169c79ddaaSMintz, Yuval 27179c79ddaaSMintz, Yuval qed_hw_info_port_num(p_hwfn, p_ptt); 2718fe56b9e6SYuval Mintz 2719fe56b9e6SYuval Mintz qed_hw_get_nvm_info(p_hwfn, p_ptt); 2720fe56b9e6SYuval Mintz 2721fe56b9e6SYuval Mintz rc = qed_int_igu_read_cam(p_hwfn, p_ptt); 2722fe56b9e6SYuval Mintz if (rc) 2723fe56b9e6SYuval Mintz return rc; 2724fe56b9e6SYuval Mintz 2725fe56b9e6SYuval Mintz if (qed_mcp_is_init(p_hwfn)) 2726fe56b9e6SYuval Mintz ether_addr_copy(p_hwfn->hw_info.hw_mac_addr, 2727fe56b9e6SYuval Mintz p_hwfn->mcp_info->func_info.mac); 2728fe56b9e6SYuval Mintz else 2729fe56b9e6SYuval Mintz eth_random_addr(p_hwfn->hw_info.hw_mac_addr); 2730fe56b9e6SYuval Mintz 2731fe56b9e6SYuval Mintz if (qed_mcp_is_init(p_hwfn)) { 2732fe56b9e6SYuval Mintz if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET) 2733fe56b9e6SYuval Mintz p_hwfn->hw_info.ovlan = 2734fe56b9e6SYuval Mintz p_hwfn->mcp_info->func_info.ovlan; 2735fe56b9e6SYuval Mintz 2736fe56b9e6SYuval Mintz qed_mcp_cmd_port_init(p_hwfn, p_ptt); 2737fe56b9e6SYuval Mintz } 2738fe56b9e6SYuval Mintz 2739fe56b9e6SYuval Mintz if (qed_mcp_is_init(p_hwfn)) { 2740fe56b9e6SYuval Mintz enum qed_pci_personality protocol; 2741fe56b9e6SYuval Mintz 2742fe56b9e6SYuval Mintz protocol = p_hwfn->mcp_info->func_info.protocol; 2743fe56b9e6SYuval Mintz p_hwfn->hw_info.personality = protocol; 2744fe56b9e6SYuval Mintz } 2745fe56b9e6SYuval Mintz 2746b5a9ee7cSAriel Elior p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 2747b5a9ee7cSAriel Elior p_hwfn->hw_info.num_active_tc = 1; 2748b5a9ee7cSAriel Elior 27491408cc1fSYuval Mintz qed_get_num_funcs(p_hwfn, p_ptt); 27501408cc1fSYuval Mintz 27510fefbfbaSSudarsana Kalluru if (qed_mcp_is_init(p_hwfn)) 27520fefbfbaSSudarsana Kalluru p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 27530fefbfbaSSudarsana Kalluru 27549c8517c4STomer Tayar return qed_hw_get_resc(p_hwfn, p_ptt); 2755fe56b9e6SYuval Mintz } 2756fe56b9e6SYuval Mintz 275715582962SRahul Verma static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2758fe56b9e6SYuval Mintz { 275915582962SRahul Verma struct qed_dev *cdev = p_hwfn->cdev; 27609c79ddaaSMintz, Yuval u16 device_id_mask; 2761fe56b9e6SYuval Mintz u32 tmp; 2762fe56b9e6SYuval Mintz 2763fc48b7a6SYuval Mintz /* Read Vendor Id / Device Id */ 27641a635e48SYuval Mintz pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); 27651a635e48SYuval Mintz pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); 27661a635e48SYuval Mintz 27679c79ddaaSMintz, Yuval /* Determine type */ 27689c79ddaaSMintz, Yuval device_id_mask = cdev->device_id & QED_DEV_ID_MASK; 27699c79ddaaSMintz, Yuval switch (device_id_mask) { 27709c79ddaaSMintz, Yuval case QED_DEV_ID_MASK_BB: 27719c79ddaaSMintz, Yuval cdev->type = QED_DEV_TYPE_BB; 27729c79ddaaSMintz, Yuval break; 27739c79ddaaSMintz, Yuval case QED_DEV_ID_MASK_AH: 27749c79ddaaSMintz, Yuval cdev->type = QED_DEV_TYPE_AH; 27759c79ddaaSMintz, Yuval break; 27769c79ddaaSMintz, Yuval default: 27779c79ddaaSMintz, Yuval DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id); 27789c79ddaaSMintz, Yuval return -EBUSY; 27799c79ddaaSMintz, Yuval } 27809c79ddaaSMintz, Yuval 278115582962SRahul Verma cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); 278215582962SRahul Verma cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); 278315582962SRahul Verma 2784fe56b9e6SYuval Mintz MASK_FIELD(CHIP_REV, cdev->chip_rev); 2785fe56b9e6SYuval Mintz 2786fe56b9e6SYuval Mintz /* Learn number of HW-functions */ 278715582962SRahul Verma tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); 2788fe56b9e6SYuval Mintz 2789fc48b7a6SYuval Mintz if (tmp & (1 << p_hwfn->rel_pf_id)) { 2790fe56b9e6SYuval Mintz DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); 2791fe56b9e6SYuval Mintz cdev->num_hwfns = 2; 2792fe56b9e6SYuval Mintz } else { 2793fe56b9e6SYuval Mintz cdev->num_hwfns = 1; 2794fe56b9e6SYuval Mintz } 2795fe56b9e6SYuval Mintz 279615582962SRahul Verma cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt, 2797fe56b9e6SYuval Mintz MISCS_REG_CHIP_TEST_REG) >> 4; 2798fe56b9e6SYuval Mintz MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); 279915582962SRahul Verma cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); 2800fe56b9e6SYuval Mintz MASK_FIELD(CHIP_METAL, cdev->chip_metal); 2801fe56b9e6SYuval Mintz 2802fe56b9e6SYuval Mintz DP_INFO(cdev->hwfns, 28039c79ddaaSMintz, Yuval "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 28049c79ddaaSMintz, Yuval QED_IS_BB(cdev) ? "BB" : "AH", 28059c79ddaaSMintz, Yuval 'A' + cdev->chip_rev, 28069c79ddaaSMintz, Yuval (int)cdev->chip_metal, 2807fe56b9e6SYuval Mintz cdev->chip_num, cdev->chip_rev, 2808fe56b9e6SYuval Mintz cdev->chip_bond_id, cdev->chip_metal); 280912e09c69SYuval Mintz 281012e09c69SYuval Mintz if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) { 281112e09c69SYuval Mintz DP_NOTICE(cdev->hwfns, 281212e09c69SYuval Mintz "The chip type/rev (BB A0) is not supported!\n"); 281312e09c69SYuval Mintz return -EINVAL; 281412e09c69SYuval Mintz } 281512e09c69SYuval Mintz 281612e09c69SYuval Mintz return 0; 2817fe56b9e6SYuval Mintz } 2818fe56b9e6SYuval Mintz 2819fe56b9e6SYuval Mintz static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, 2820fe56b9e6SYuval Mintz void __iomem *p_regview, 2821fe56b9e6SYuval Mintz void __iomem *p_doorbells, 2822fe56b9e6SYuval Mintz enum qed_pci_personality personality) 2823fe56b9e6SYuval Mintz { 2824fe56b9e6SYuval Mintz int rc = 0; 2825fe56b9e6SYuval Mintz 2826fe56b9e6SYuval Mintz /* Split PCI bars evenly between hwfns */ 2827fe56b9e6SYuval Mintz p_hwfn->regview = p_regview; 2828fe56b9e6SYuval Mintz p_hwfn->doorbells = p_doorbells; 2829fe56b9e6SYuval Mintz 28301408cc1fSYuval Mintz if (IS_VF(p_hwfn->cdev)) 28311408cc1fSYuval Mintz return qed_vf_hw_prepare(p_hwfn); 28321408cc1fSYuval Mintz 2833fe56b9e6SYuval Mintz /* Validate that chip access is feasible */ 2834fe56b9e6SYuval Mintz if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 2835fe56b9e6SYuval Mintz DP_ERR(p_hwfn, 2836fe56b9e6SYuval Mintz "Reading the ME register returns all Fs; Preventing further chip access\n"); 2837fe56b9e6SYuval Mintz return -EINVAL; 2838fe56b9e6SYuval Mintz } 2839fe56b9e6SYuval Mintz 2840fe56b9e6SYuval Mintz get_function_id(p_hwfn); 2841fe56b9e6SYuval Mintz 284212e09c69SYuval Mintz /* Allocate PTT pool */ 284312e09c69SYuval Mintz rc = qed_ptt_pool_alloc(p_hwfn); 28442591c280SJoe Perches if (rc) 2845fe56b9e6SYuval Mintz goto err0; 2846fe56b9e6SYuval Mintz 284712e09c69SYuval Mintz /* Allocate the main PTT */ 284812e09c69SYuval Mintz p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 284912e09c69SYuval Mintz 2850fe56b9e6SYuval Mintz /* First hwfn learns basic information, e.g., number of hwfns */ 285112e09c69SYuval Mintz if (!p_hwfn->my_id) { 285215582962SRahul Verma rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); 28531a635e48SYuval Mintz if (rc) 285412e09c69SYuval Mintz goto err1; 285512e09c69SYuval Mintz } 285612e09c69SYuval Mintz 285712e09c69SYuval Mintz qed_hw_hwfn_prepare(p_hwfn); 2858fe56b9e6SYuval Mintz 2859fe56b9e6SYuval Mintz /* Initialize MCP structure */ 2860fe56b9e6SYuval Mintz rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 2861fe56b9e6SYuval Mintz if (rc) { 2862fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Failed initializing mcp command\n"); 2863fe56b9e6SYuval Mintz goto err1; 2864fe56b9e6SYuval Mintz } 2865fe56b9e6SYuval Mintz 2866fe56b9e6SYuval Mintz /* Read the device configuration information from the HW and SHMEM */ 2867fe56b9e6SYuval Mintz rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality); 2868fe56b9e6SYuval Mintz if (rc) { 2869fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Failed to get HW information\n"); 2870fe56b9e6SYuval Mintz goto err2; 2871fe56b9e6SYuval Mintz } 2872fe56b9e6SYuval Mintz 287318a69e36SMintz, Yuval /* Sending a mailbox to the MFW should be done after qed_get_hw_info() 287418a69e36SMintz, Yuval * is called as it sets the ports number in an engine. 287518a69e36SMintz, Yuval */ 287618a69e36SMintz, Yuval if (IS_LEAD_HWFN(p_hwfn)) { 287718a69e36SMintz, Yuval rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 287818a69e36SMintz, Yuval if (rc) 287918a69e36SMintz, Yuval DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); 288018a69e36SMintz, Yuval } 288118a69e36SMintz, Yuval 2882fe56b9e6SYuval Mintz /* Allocate the init RT array and initialize the init-ops engine */ 2883fe56b9e6SYuval Mintz rc = qed_init_alloc(p_hwfn); 28842591c280SJoe Perches if (rc) 2885fe56b9e6SYuval Mintz goto err2; 2886fe56b9e6SYuval Mintz 2887fe56b9e6SYuval Mintz return rc; 2888fe56b9e6SYuval Mintz err2: 288932a47e72SYuval Mintz if (IS_LEAD_HWFN(p_hwfn)) 289032a47e72SYuval Mintz qed_iov_free_hw_info(p_hwfn->cdev); 2891fe56b9e6SYuval Mintz qed_mcp_free(p_hwfn); 2892fe56b9e6SYuval Mintz err1: 2893fe56b9e6SYuval Mintz qed_hw_hwfn_free(p_hwfn); 2894fe56b9e6SYuval Mintz err0: 2895fe56b9e6SYuval Mintz return rc; 2896fe56b9e6SYuval Mintz } 2897fe56b9e6SYuval Mintz 2898fe56b9e6SYuval Mintz int qed_hw_prepare(struct qed_dev *cdev, 2899fe56b9e6SYuval Mintz int personality) 2900fe56b9e6SYuval Mintz { 2901c78df14eSAriel Elior struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2902c78df14eSAriel Elior int rc; 2903fe56b9e6SYuval Mintz 2904fe56b9e6SYuval Mintz /* Store the precompiled init data ptrs */ 29051408cc1fSYuval Mintz if (IS_PF(cdev)) 2906fe56b9e6SYuval Mintz qed_init_iro_array(cdev); 2907fe56b9e6SYuval Mintz 2908fe56b9e6SYuval Mintz /* Initialize the first hwfn - will learn number of hwfns */ 2909c78df14eSAriel Elior rc = qed_hw_prepare_single(p_hwfn, 2910c78df14eSAriel Elior cdev->regview, 2911fe56b9e6SYuval Mintz cdev->doorbells, personality); 2912fe56b9e6SYuval Mintz if (rc) 2913fe56b9e6SYuval Mintz return rc; 2914fe56b9e6SYuval Mintz 2915c78df14eSAriel Elior personality = p_hwfn->hw_info.personality; 2916fe56b9e6SYuval Mintz 2917fe56b9e6SYuval Mintz /* Initialize the rest of the hwfns */ 2918c78df14eSAriel Elior if (cdev->num_hwfns > 1) { 2919fe56b9e6SYuval Mintz void __iomem *p_regview, *p_doorbell; 2920c78df14eSAriel Elior u8 __iomem *addr; 2921fe56b9e6SYuval Mintz 2922c78df14eSAriel Elior /* adjust bar offset for second engine */ 292315582962SRahul Verma addr = cdev->regview + 292415582962SRahul Verma qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, 292515582962SRahul Verma BAR_ID_0) / 2; 2926c78df14eSAriel Elior p_regview = addr; 2927c78df14eSAriel Elior 292815582962SRahul Verma addr = cdev->doorbells + 292915582962SRahul Verma qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, 293015582962SRahul Verma BAR_ID_1) / 2; 2931c78df14eSAriel Elior p_doorbell = addr; 2932c78df14eSAriel Elior 2933c78df14eSAriel Elior /* prepare second hw function */ 2934c78df14eSAriel Elior rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, 2935fe56b9e6SYuval Mintz p_doorbell, personality); 2936c78df14eSAriel Elior 2937c78df14eSAriel Elior /* in case of error, need to free the previously 2938c78df14eSAriel Elior * initiliazed hwfn 0. 2939c78df14eSAriel Elior */ 2940fe56b9e6SYuval Mintz if (rc) { 29411408cc1fSYuval Mintz if (IS_PF(cdev)) { 2942c78df14eSAriel Elior qed_init_free(p_hwfn); 2943c78df14eSAriel Elior qed_mcp_free(p_hwfn); 2944c78df14eSAriel Elior qed_hw_hwfn_free(p_hwfn); 2945fe56b9e6SYuval Mintz } 2946fe56b9e6SYuval Mintz } 29471408cc1fSYuval Mintz } 2948fe56b9e6SYuval Mintz 2949c78df14eSAriel Elior return rc; 2950fe56b9e6SYuval Mintz } 2951fe56b9e6SYuval Mintz 2952fe56b9e6SYuval Mintz void qed_hw_remove(struct qed_dev *cdev) 2953fe56b9e6SYuval Mintz { 29540fefbfbaSSudarsana Kalluru struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2955fe56b9e6SYuval Mintz int i; 2956fe56b9e6SYuval Mintz 29570fefbfbaSSudarsana Kalluru if (IS_PF(cdev)) 29580fefbfbaSSudarsana Kalluru qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 29590fefbfbaSSudarsana Kalluru QED_OV_DRIVER_STATE_NOT_LOADED); 29600fefbfbaSSudarsana Kalluru 2961fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 2962fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2963fe56b9e6SYuval Mintz 29641408cc1fSYuval Mintz if (IS_VF(cdev)) { 29650b55e27dSYuval Mintz qed_vf_pf_release(p_hwfn); 29661408cc1fSYuval Mintz continue; 29671408cc1fSYuval Mintz } 29681408cc1fSYuval Mintz 2969fe56b9e6SYuval Mintz qed_init_free(p_hwfn); 2970fe56b9e6SYuval Mintz qed_hw_hwfn_free(p_hwfn); 2971fe56b9e6SYuval Mintz qed_mcp_free(p_hwfn); 2972fe56b9e6SYuval Mintz } 297332a47e72SYuval Mintz 297432a47e72SYuval Mintz qed_iov_free_hw_info(cdev); 2975fe56b9e6SYuval Mintz } 2976fe56b9e6SYuval Mintz 2977a91eb52aSYuval Mintz static void qed_chain_free_next_ptr(struct qed_dev *cdev, 2978a91eb52aSYuval Mintz struct qed_chain *p_chain) 2979a91eb52aSYuval Mintz { 2980a91eb52aSYuval Mintz void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL; 2981a91eb52aSYuval Mintz dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 2982a91eb52aSYuval Mintz struct qed_chain_next *p_next; 2983a91eb52aSYuval Mintz u32 size, i; 2984a91eb52aSYuval Mintz 2985a91eb52aSYuval Mintz if (!p_virt) 2986a91eb52aSYuval Mintz return; 2987a91eb52aSYuval Mintz 2988a91eb52aSYuval Mintz size = p_chain->elem_size * p_chain->usable_per_page; 2989a91eb52aSYuval Mintz 2990a91eb52aSYuval Mintz for (i = 0; i < p_chain->page_cnt; i++) { 2991a91eb52aSYuval Mintz if (!p_virt) 2992a91eb52aSYuval Mintz break; 2993a91eb52aSYuval Mintz 2994a91eb52aSYuval Mintz p_next = (struct qed_chain_next *)((u8 *)p_virt + size); 2995a91eb52aSYuval Mintz p_virt_next = p_next->next_virt; 2996a91eb52aSYuval Mintz p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 2997a91eb52aSYuval Mintz 2998a91eb52aSYuval Mintz dma_free_coherent(&cdev->pdev->dev, 2999a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, p_virt, p_phys); 3000a91eb52aSYuval Mintz 3001a91eb52aSYuval Mintz p_virt = p_virt_next; 3002a91eb52aSYuval Mintz p_phys = p_phys_next; 3003a91eb52aSYuval Mintz } 3004a91eb52aSYuval Mintz } 3005a91eb52aSYuval Mintz 3006a91eb52aSYuval Mintz static void qed_chain_free_single(struct qed_dev *cdev, 3007a91eb52aSYuval Mintz struct qed_chain *p_chain) 3008a91eb52aSYuval Mintz { 3009a91eb52aSYuval Mintz if (!p_chain->p_virt_addr) 3010a91eb52aSYuval Mintz return; 3011a91eb52aSYuval Mintz 3012a91eb52aSYuval Mintz dma_free_coherent(&cdev->pdev->dev, 3013a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, 3014a91eb52aSYuval Mintz p_chain->p_virt_addr, p_chain->p_phys_addr); 3015a91eb52aSYuval Mintz } 3016a91eb52aSYuval Mintz 3017a91eb52aSYuval Mintz static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) 3018a91eb52aSYuval Mintz { 3019a91eb52aSYuval Mintz void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 3020a91eb52aSYuval Mintz u32 page_cnt = p_chain->page_cnt, i, pbl_size; 30216d937acfSMintz, Yuval u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table; 3022a91eb52aSYuval Mintz 3023a91eb52aSYuval Mintz if (!pp_virt_addr_tbl) 3024a91eb52aSYuval Mintz return; 3025a91eb52aSYuval Mintz 30266d937acfSMintz, Yuval if (!p_pbl_virt) 3027a91eb52aSYuval Mintz goto out; 3028a91eb52aSYuval Mintz 3029a91eb52aSYuval Mintz for (i = 0; i < page_cnt; i++) { 3030a91eb52aSYuval Mintz if (!pp_virt_addr_tbl[i]) 3031a91eb52aSYuval Mintz break; 3032a91eb52aSYuval Mintz 3033a91eb52aSYuval Mintz dma_free_coherent(&cdev->pdev->dev, 3034a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, 3035a91eb52aSYuval Mintz pp_virt_addr_tbl[i], 3036a91eb52aSYuval Mintz *(dma_addr_t *)p_pbl_virt); 3037a91eb52aSYuval Mintz 3038a91eb52aSYuval Mintz p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; 3039a91eb52aSYuval Mintz } 3040a91eb52aSYuval Mintz 3041a91eb52aSYuval Mintz pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 3042a91eb52aSYuval Mintz dma_free_coherent(&cdev->pdev->dev, 3043a91eb52aSYuval Mintz pbl_size, 30446d937acfSMintz, Yuval p_chain->pbl_sp.p_virt_table, 30456d937acfSMintz, Yuval p_chain->pbl_sp.p_phys_table); 3046a91eb52aSYuval Mintz out: 3047a91eb52aSYuval Mintz vfree(p_chain->pbl.pp_virt_addr_tbl); 3048a91eb52aSYuval Mintz } 3049a91eb52aSYuval Mintz 3050a91eb52aSYuval Mintz void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) 3051a91eb52aSYuval Mintz { 3052a91eb52aSYuval Mintz switch (p_chain->mode) { 3053a91eb52aSYuval Mintz case QED_CHAIN_MODE_NEXT_PTR: 3054a91eb52aSYuval Mintz qed_chain_free_next_ptr(cdev, p_chain); 3055a91eb52aSYuval Mintz break; 3056a91eb52aSYuval Mintz case QED_CHAIN_MODE_SINGLE: 3057a91eb52aSYuval Mintz qed_chain_free_single(cdev, p_chain); 3058a91eb52aSYuval Mintz break; 3059a91eb52aSYuval Mintz case QED_CHAIN_MODE_PBL: 3060a91eb52aSYuval Mintz qed_chain_free_pbl(cdev, p_chain); 3061a91eb52aSYuval Mintz break; 3062a91eb52aSYuval Mintz } 3063a91eb52aSYuval Mintz } 3064a91eb52aSYuval Mintz 3065a91eb52aSYuval Mintz static int 3066a91eb52aSYuval Mintz qed_chain_alloc_sanity_check(struct qed_dev *cdev, 3067a91eb52aSYuval Mintz enum qed_chain_cnt_type cnt_type, 3068a91eb52aSYuval Mintz size_t elem_size, u32 page_cnt) 3069a91eb52aSYuval Mintz { 3070a91eb52aSYuval Mintz u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 3071a91eb52aSYuval Mintz 3072a91eb52aSYuval Mintz /* The actual chain size can be larger than the maximal possible value 3073a91eb52aSYuval Mintz * after rounding up the requested elements number to pages, and after 3074a91eb52aSYuval Mintz * taking into acount the unusuable elements (next-ptr elements). 3075a91eb52aSYuval Mintz * The size of a "u16" chain can be (U16_MAX + 1) since the chain 3076a91eb52aSYuval Mintz * size/capacity fields are of a u32 type. 3077a91eb52aSYuval Mintz */ 3078a91eb52aSYuval Mintz if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && 30793ef310a7STomer Tayar chain_size > ((u32)U16_MAX + 1)) || 30803ef310a7STomer Tayar (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) { 3081a91eb52aSYuval Mintz DP_NOTICE(cdev, 3082a91eb52aSYuval Mintz "The actual chain size (0x%llx) is larger than the maximal possible value\n", 3083a91eb52aSYuval Mintz chain_size); 3084a91eb52aSYuval Mintz return -EINVAL; 3085a91eb52aSYuval Mintz } 3086a91eb52aSYuval Mintz 3087a91eb52aSYuval Mintz return 0; 3088a91eb52aSYuval Mintz } 3089a91eb52aSYuval Mintz 3090a91eb52aSYuval Mintz static int 3091a91eb52aSYuval Mintz qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain) 3092a91eb52aSYuval Mintz { 3093a91eb52aSYuval Mintz void *p_virt = NULL, *p_virt_prev = NULL; 3094a91eb52aSYuval Mintz dma_addr_t p_phys = 0; 3095a91eb52aSYuval Mintz u32 i; 3096a91eb52aSYuval Mintz 3097a91eb52aSYuval Mintz for (i = 0; i < p_chain->page_cnt; i++) { 3098a91eb52aSYuval Mintz p_virt = dma_alloc_coherent(&cdev->pdev->dev, 3099a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, 3100a91eb52aSYuval Mintz &p_phys, GFP_KERNEL); 31012591c280SJoe Perches if (!p_virt) 3102a91eb52aSYuval Mintz return -ENOMEM; 3103a91eb52aSYuval Mintz 3104a91eb52aSYuval Mintz if (i == 0) { 3105a91eb52aSYuval Mintz qed_chain_init_mem(p_chain, p_virt, p_phys); 3106a91eb52aSYuval Mintz qed_chain_reset(p_chain); 3107a91eb52aSYuval Mintz } else { 3108a91eb52aSYuval Mintz qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, 3109a91eb52aSYuval Mintz p_virt, p_phys); 3110a91eb52aSYuval Mintz } 3111a91eb52aSYuval Mintz 3112a91eb52aSYuval Mintz p_virt_prev = p_virt; 3113a91eb52aSYuval Mintz } 3114a91eb52aSYuval Mintz /* Last page's next element should point to the beginning of the 3115a91eb52aSYuval Mintz * chain. 3116a91eb52aSYuval Mintz */ 3117a91eb52aSYuval Mintz qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, 3118a91eb52aSYuval Mintz p_chain->p_virt_addr, 3119a91eb52aSYuval Mintz p_chain->p_phys_addr); 3120a91eb52aSYuval Mintz 3121a91eb52aSYuval Mintz return 0; 3122a91eb52aSYuval Mintz } 3123a91eb52aSYuval Mintz 3124a91eb52aSYuval Mintz static int 3125a91eb52aSYuval Mintz qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) 3126a91eb52aSYuval Mintz { 3127a91eb52aSYuval Mintz dma_addr_t p_phys = 0; 3128a91eb52aSYuval Mintz void *p_virt = NULL; 3129a91eb52aSYuval Mintz 3130a91eb52aSYuval Mintz p_virt = dma_alloc_coherent(&cdev->pdev->dev, 3131a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); 31322591c280SJoe Perches if (!p_virt) 3133a91eb52aSYuval Mintz return -ENOMEM; 3134a91eb52aSYuval Mintz 3135a91eb52aSYuval Mintz qed_chain_init_mem(p_chain, p_virt, p_phys); 3136a91eb52aSYuval Mintz qed_chain_reset(p_chain); 3137a91eb52aSYuval Mintz 3138a91eb52aSYuval Mintz return 0; 3139a91eb52aSYuval Mintz } 3140a91eb52aSYuval Mintz 3141a91eb52aSYuval Mintz static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) 3142a91eb52aSYuval Mintz { 3143a91eb52aSYuval Mintz u32 page_cnt = p_chain->page_cnt, size, i; 3144a91eb52aSYuval Mintz dma_addr_t p_phys = 0, p_pbl_phys = 0; 3145a91eb52aSYuval Mintz void **pp_virt_addr_tbl = NULL; 3146a91eb52aSYuval Mintz u8 *p_pbl_virt = NULL; 3147a91eb52aSYuval Mintz void *p_virt = NULL; 3148a91eb52aSYuval Mintz 3149a91eb52aSYuval Mintz size = page_cnt * sizeof(*pp_virt_addr_tbl); 31502591c280SJoe Perches pp_virt_addr_tbl = vzalloc(size); 31512591c280SJoe Perches if (!pp_virt_addr_tbl) 3152a91eb52aSYuval Mintz return -ENOMEM; 3153a91eb52aSYuval Mintz 3154a91eb52aSYuval Mintz /* The allocation of the PBL table is done with its full size, since it 3155a91eb52aSYuval Mintz * is expected to be successive. 3156a91eb52aSYuval Mintz * qed_chain_init_pbl_mem() is called even in a case of an allocation 3157a91eb52aSYuval Mintz * failure, since pp_virt_addr_tbl was previously allocated, and it 3158a91eb52aSYuval Mintz * should be saved to allow its freeing during the error flow. 3159a91eb52aSYuval Mintz */ 3160a91eb52aSYuval Mintz size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 3161a91eb52aSYuval Mintz p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, 3162a91eb52aSYuval Mintz size, &p_pbl_phys, GFP_KERNEL); 3163a91eb52aSYuval Mintz qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 3164a91eb52aSYuval Mintz pp_virt_addr_tbl); 31652591c280SJoe Perches if (!p_pbl_virt) 3166a91eb52aSYuval Mintz return -ENOMEM; 3167a91eb52aSYuval Mintz 3168a91eb52aSYuval Mintz for (i = 0; i < page_cnt; i++) { 3169a91eb52aSYuval Mintz p_virt = dma_alloc_coherent(&cdev->pdev->dev, 3170a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, 3171a91eb52aSYuval Mintz &p_phys, GFP_KERNEL); 31722591c280SJoe Perches if (!p_virt) 3173a91eb52aSYuval Mintz return -ENOMEM; 3174a91eb52aSYuval Mintz 3175a91eb52aSYuval Mintz if (i == 0) { 3176a91eb52aSYuval Mintz qed_chain_init_mem(p_chain, p_virt, p_phys); 3177a91eb52aSYuval Mintz qed_chain_reset(p_chain); 3178a91eb52aSYuval Mintz } 3179a91eb52aSYuval Mintz 3180a91eb52aSYuval Mintz /* Fill the PBL table with the physical address of the page */ 3181a91eb52aSYuval Mintz *(dma_addr_t *)p_pbl_virt = p_phys; 3182a91eb52aSYuval Mintz /* Keep the virtual address of the page */ 3183a91eb52aSYuval Mintz p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 3184a91eb52aSYuval Mintz 3185a91eb52aSYuval Mintz p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; 3186a91eb52aSYuval Mintz } 3187a91eb52aSYuval Mintz 3188a91eb52aSYuval Mintz return 0; 3189a91eb52aSYuval Mintz } 3190a91eb52aSYuval Mintz 3191fe56b9e6SYuval Mintz int qed_chain_alloc(struct qed_dev *cdev, 3192fe56b9e6SYuval Mintz enum qed_chain_use_mode intended_use, 3193fe56b9e6SYuval Mintz enum qed_chain_mode mode, 3194a91eb52aSYuval Mintz enum qed_chain_cnt_type cnt_type, 3195a91eb52aSYuval Mintz u32 num_elems, size_t elem_size, struct qed_chain *p_chain) 3196fe56b9e6SYuval Mintz { 3197a91eb52aSYuval Mintz u32 page_cnt; 3198a91eb52aSYuval Mintz int rc = 0; 3199fe56b9e6SYuval Mintz 3200fe56b9e6SYuval Mintz if (mode == QED_CHAIN_MODE_SINGLE) 3201fe56b9e6SYuval Mintz page_cnt = 1; 3202fe56b9e6SYuval Mintz else 3203fe56b9e6SYuval Mintz page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 3204fe56b9e6SYuval Mintz 3205a91eb52aSYuval Mintz rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); 3206a91eb52aSYuval Mintz if (rc) { 3207a91eb52aSYuval Mintz DP_NOTICE(cdev, 32082591c280SJoe Perches "Cannot allocate a chain with the given arguments:\n"); 32092591c280SJoe Perches DP_NOTICE(cdev, 3210a91eb52aSYuval Mintz "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 3211a91eb52aSYuval Mintz intended_use, mode, cnt_type, num_elems, elem_size); 3212a91eb52aSYuval Mintz return rc; 3213fe56b9e6SYuval Mintz } 3214fe56b9e6SYuval Mintz 3215a91eb52aSYuval Mintz qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use, 3216a91eb52aSYuval Mintz mode, cnt_type); 3217fe56b9e6SYuval Mintz 3218a91eb52aSYuval Mintz switch (mode) { 3219a91eb52aSYuval Mintz case QED_CHAIN_MODE_NEXT_PTR: 3220a91eb52aSYuval Mintz rc = qed_chain_alloc_next_ptr(cdev, p_chain); 3221a91eb52aSYuval Mintz break; 3222a91eb52aSYuval Mintz case QED_CHAIN_MODE_SINGLE: 3223a91eb52aSYuval Mintz rc = qed_chain_alloc_single(cdev, p_chain); 3224a91eb52aSYuval Mintz break; 3225a91eb52aSYuval Mintz case QED_CHAIN_MODE_PBL: 3226a91eb52aSYuval Mintz rc = qed_chain_alloc_pbl(cdev, p_chain); 3227a91eb52aSYuval Mintz break; 3228fe56b9e6SYuval Mintz } 3229a91eb52aSYuval Mintz if (rc) 3230a91eb52aSYuval Mintz goto nomem; 3231fe56b9e6SYuval Mintz 3232fe56b9e6SYuval Mintz return 0; 3233fe56b9e6SYuval Mintz 3234fe56b9e6SYuval Mintz nomem: 3235a91eb52aSYuval Mintz qed_chain_free(cdev, p_chain); 3236a91eb52aSYuval Mintz return rc; 3237fe56b9e6SYuval Mintz } 3238fe56b9e6SYuval Mintz 3239a91eb52aSYuval Mintz int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) 3240cee4d264SManish Chopra { 3241cee4d264SManish Chopra if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { 3242cee4d264SManish Chopra u16 min, max; 3243cee4d264SManish Chopra 3244cee4d264SManish Chopra min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); 3245cee4d264SManish Chopra max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); 3246cee4d264SManish Chopra DP_NOTICE(p_hwfn, 3247cee4d264SManish Chopra "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 3248cee4d264SManish Chopra src_id, min, max); 3249cee4d264SManish Chopra 3250cee4d264SManish Chopra return -EINVAL; 3251cee4d264SManish Chopra } 3252cee4d264SManish Chopra 3253cee4d264SManish Chopra *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; 3254cee4d264SManish Chopra 3255cee4d264SManish Chopra return 0; 3256cee4d264SManish Chopra } 3257cee4d264SManish Chopra 32581a635e48SYuval Mintz int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) 3259cee4d264SManish Chopra { 3260cee4d264SManish Chopra if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { 3261cee4d264SManish Chopra u8 min, max; 3262cee4d264SManish Chopra 3263cee4d264SManish Chopra min = (u8)RESC_START(p_hwfn, QED_VPORT); 3264cee4d264SManish Chopra max = min + RESC_NUM(p_hwfn, QED_VPORT); 3265cee4d264SManish Chopra DP_NOTICE(p_hwfn, 3266cee4d264SManish Chopra "vport id [%d] is not valid, available indices [%d - %d]\n", 3267cee4d264SManish Chopra src_id, min, max); 3268cee4d264SManish Chopra 3269cee4d264SManish Chopra return -EINVAL; 3270cee4d264SManish Chopra } 3271cee4d264SManish Chopra 3272cee4d264SManish Chopra *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; 3273cee4d264SManish Chopra 3274cee4d264SManish Chopra return 0; 3275cee4d264SManish Chopra } 3276cee4d264SManish Chopra 32771a635e48SYuval Mintz int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) 3278cee4d264SManish Chopra { 3279cee4d264SManish Chopra if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { 3280cee4d264SManish Chopra u8 min, max; 3281cee4d264SManish Chopra 3282cee4d264SManish Chopra min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); 3283cee4d264SManish Chopra max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); 3284cee4d264SManish Chopra DP_NOTICE(p_hwfn, 3285cee4d264SManish Chopra "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 3286cee4d264SManish Chopra src_id, min, max); 3287cee4d264SManish Chopra 3288cee4d264SManish Chopra return -EINVAL; 3289cee4d264SManish Chopra } 3290cee4d264SManish Chopra 3291cee4d264SManish Chopra *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; 3292cee4d264SManish Chopra 3293cee4d264SManish Chopra return 0; 3294cee4d264SManish Chopra } 3295bcd197c8SManish Chopra 32960a7fb11cSYuval Mintz static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low, 32970a7fb11cSYuval Mintz u8 *p_filter) 32980a7fb11cSYuval Mintz { 32990a7fb11cSYuval Mintz *p_high = p_filter[1] | (p_filter[0] << 8); 33000a7fb11cSYuval Mintz *p_low = p_filter[5] | (p_filter[4] << 8) | 33010a7fb11cSYuval Mintz (p_filter[3] << 16) | (p_filter[2] << 24); 33020a7fb11cSYuval Mintz } 33030a7fb11cSYuval Mintz 33040a7fb11cSYuval Mintz int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, 33050a7fb11cSYuval Mintz struct qed_ptt *p_ptt, u8 *p_filter) 33060a7fb11cSYuval Mintz { 33070a7fb11cSYuval Mintz u32 high = 0, low = 0, en; 33080a7fb11cSYuval Mintz int i; 33090a7fb11cSYuval Mintz 33100a7fb11cSYuval Mintz if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 33110a7fb11cSYuval Mintz return 0; 33120a7fb11cSYuval Mintz 33130a7fb11cSYuval Mintz qed_llh_mac_to_filter(&high, &low, p_filter); 33140a7fb11cSYuval Mintz 33150a7fb11cSYuval Mintz /* Find a free entry and utilize it */ 33160a7fb11cSYuval Mintz for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 33170a7fb11cSYuval Mintz en = qed_rd(p_hwfn, p_ptt, 33180a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); 33190a7fb11cSYuval Mintz if (en) 33200a7fb11cSYuval Mintz continue; 33210a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33220a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 33230a7fb11cSYuval Mintz 2 * i * sizeof(u32), low); 33240a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33250a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 33260a7fb11cSYuval Mintz (2 * i + 1) * sizeof(u32), high); 33270a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33280a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); 33290a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33300a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 33310a7fb11cSYuval Mintz i * sizeof(u32), 0); 33320a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33330a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); 33340a7fb11cSYuval Mintz break; 33350a7fb11cSYuval Mintz } 33360a7fb11cSYuval Mintz if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { 33370a7fb11cSYuval Mintz DP_NOTICE(p_hwfn, 33380a7fb11cSYuval Mintz "Failed to find an empty LLH filter to utilize\n"); 33390a7fb11cSYuval Mintz return -EINVAL; 33400a7fb11cSYuval Mintz } 33410a7fb11cSYuval Mintz 33420a7fb11cSYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 33430a7fb11cSYuval Mintz "mac: %pM is added at %d\n", 33440a7fb11cSYuval Mintz p_filter, i); 33450a7fb11cSYuval Mintz 33460a7fb11cSYuval Mintz return 0; 33470a7fb11cSYuval Mintz } 33480a7fb11cSYuval Mintz 33490a7fb11cSYuval Mintz void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, 33500a7fb11cSYuval Mintz struct qed_ptt *p_ptt, u8 *p_filter) 33510a7fb11cSYuval Mintz { 33520a7fb11cSYuval Mintz u32 high = 0, low = 0; 33530a7fb11cSYuval Mintz int i; 33540a7fb11cSYuval Mintz 33550a7fb11cSYuval Mintz if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 33560a7fb11cSYuval Mintz return; 33570a7fb11cSYuval Mintz 33580a7fb11cSYuval Mintz qed_llh_mac_to_filter(&high, &low, p_filter); 33590a7fb11cSYuval Mintz 33600a7fb11cSYuval Mintz /* Find the entry and clean it */ 33610a7fb11cSYuval Mintz for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 33620a7fb11cSYuval Mintz if (qed_rd(p_hwfn, p_ptt, 33630a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 33640a7fb11cSYuval Mintz 2 * i * sizeof(u32)) != low) 33650a7fb11cSYuval Mintz continue; 33660a7fb11cSYuval Mintz if (qed_rd(p_hwfn, p_ptt, 33670a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 33680a7fb11cSYuval Mintz (2 * i + 1) * sizeof(u32)) != high) 33690a7fb11cSYuval Mintz continue; 33700a7fb11cSYuval Mintz 33710a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33720a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); 33730a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33740a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); 33750a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33760a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 33770a7fb11cSYuval Mintz (2 * i + 1) * sizeof(u32), 0); 33780a7fb11cSYuval Mintz 33790a7fb11cSYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 33800a7fb11cSYuval Mintz "mac: %pM is removed from %d\n", 33810a7fb11cSYuval Mintz p_filter, i); 33820a7fb11cSYuval Mintz break; 33830a7fb11cSYuval Mintz } 33840a7fb11cSYuval Mintz if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 33850a7fb11cSYuval Mintz DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); 33860a7fb11cSYuval Mintz } 33870a7fb11cSYuval Mintz 33881e128c81SArun Easi int 33891e128c81SArun Easi qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, 33901e128c81SArun Easi struct qed_ptt *p_ptt, 33911e128c81SArun Easi u16 source_port_or_eth_type, 33921e128c81SArun Easi u16 dest_port, enum qed_llh_port_filter_type_t type) 33931e128c81SArun Easi { 33941e128c81SArun Easi u32 high = 0, low = 0, en; 33951e128c81SArun Easi int i; 33961e128c81SArun Easi 33971e128c81SArun Easi if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 33981e128c81SArun Easi return 0; 33991e128c81SArun Easi 34001e128c81SArun Easi switch (type) { 34011e128c81SArun Easi case QED_LLH_FILTER_ETHERTYPE: 34021e128c81SArun Easi high = source_port_or_eth_type; 34031e128c81SArun Easi break; 34041e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_PORT: 34051e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_PORT: 34061e128c81SArun Easi low = source_port_or_eth_type << 16; 34071e128c81SArun Easi break; 34081e128c81SArun Easi case QED_LLH_FILTER_TCP_DEST_PORT: 34091e128c81SArun Easi case QED_LLH_FILTER_UDP_DEST_PORT: 34101e128c81SArun Easi low = dest_port; 34111e128c81SArun Easi break; 34121e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 34131e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 34141e128c81SArun Easi low = (source_port_or_eth_type << 16) | dest_port; 34151e128c81SArun Easi break; 34161e128c81SArun Easi default: 34171e128c81SArun Easi DP_NOTICE(p_hwfn, 34181e128c81SArun Easi "Non valid LLH protocol filter type %d\n", type); 34191e128c81SArun Easi return -EINVAL; 34201e128c81SArun Easi } 34211e128c81SArun Easi /* Find a free entry and utilize it */ 34221e128c81SArun Easi for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 34231e128c81SArun Easi en = qed_rd(p_hwfn, p_ptt, 34241e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); 34251e128c81SArun Easi if (en) 34261e128c81SArun Easi continue; 34271e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 34281e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 34291e128c81SArun Easi 2 * i * sizeof(u32), low); 34301e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 34311e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 34321e128c81SArun Easi (2 * i + 1) * sizeof(u32), high); 34331e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 34341e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1); 34351e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 34361e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 34371e128c81SArun Easi i * sizeof(u32), 1 << type); 34381e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 34391e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); 34401e128c81SArun Easi break; 34411e128c81SArun Easi } 34421e128c81SArun Easi if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { 34431e128c81SArun Easi DP_NOTICE(p_hwfn, 34441e128c81SArun Easi "Failed to find an empty LLH filter to utilize\n"); 34451e128c81SArun Easi return -EINVAL; 34461e128c81SArun Easi } 34471e128c81SArun Easi switch (type) { 34481e128c81SArun Easi case QED_LLH_FILTER_ETHERTYPE: 34491e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34501e128c81SArun Easi "ETH type %x is added at %d\n", 34511e128c81SArun Easi source_port_or_eth_type, i); 34521e128c81SArun Easi break; 34531e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_PORT: 34541e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34551e128c81SArun Easi "TCP src port %x is added at %d\n", 34561e128c81SArun Easi source_port_or_eth_type, i); 34571e128c81SArun Easi break; 34581e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_PORT: 34591e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34601e128c81SArun Easi "UDP src port %x is added at %d\n", 34611e128c81SArun Easi source_port_or_eth_type, i); 34621e128c81SArun Easi break; 34631e128c81SArun Easi case QED_LLH_FILTER_TCP_DEST_PORT: 34641e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34651e128c81SArun Easi "TCP dst port %x is added at %d\n", dest_port, i); 34661e128c81SArun Easi break; 34671e128c81SArun Easi case QED_LLH_FILTER_UDP_DEST_PORT: 34681e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34691e128c81SArun Easi "UDP dst port %x is added at %d\n", dest_port, i); 34701e128c81SArun Easi break; 34711e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 34721e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34731e128c81SArun Easi "TCP src/dst ports %x/%x are added at %d\n", 34741e128c81SArun Easi source_port_or_eth_type, dest_port, i); 34751e128c81SArun Easi break; 34761e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 34771e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34781e128c81SArun Easi "UDP src/dst ports %x/%x are added at %d\n", 34791e128c81SArun Easi source_port_or_eth_type, dest_port, i); 34801e128c81SArun Easi break; 34811e128c81SArun Easi } 34821e128c81SArun Easi return 0; 34831e128c81SArun Easi } 34841e128c81SArun Easi 34851e128c81SArun Easi void 34861e128c81SArun Easi qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, 34871e128c81SArun Easi struct qed_ptt *p_ptt, 34881e128c81SArun Easi u16 source_port_or_eth_type, 34891e128c81SArun Easi u16 dest_port, 34901e128c81SArun Easi enum qed_llh_port_filter_type_t type) 34911e128c81SArun Easi { 34921e128c81SArun Easi u32 high = 0, low = 0; 34931e128c81SArun Easi int i; 34941e128c81SArun Easi 34951e128c81SArun Easi if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 34961e128c81SArun Easi return; 34971e128c81SArun Easi 34981e128c81SArun Easi switch (type) { 34991e128c81SArun Easi case QED_LLH_FILTER_ETHERTYPE: 35001e128c81SArun Easi high = source_port_or_eth_type; 35011e128c81SArun Easi break; 35021e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_PORT: 35031e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_PORT: 35041e128c81SArun Easi low = source_port_or_eth_type << 16; 35051e128c81SArun Easi break; 35061e128c81SArun Easi case QED_LLH_FILTER_TCP_DEST_PORT: 35071e128c81SArun Easi case QED_LLH_FILTER_UDP_DEST_PORT: 35081e128c81SArun Easi low = dest_port; 35091e128c81SArun Easi break; 35101e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 35111e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 35121e128c81SArun Easi low = (source_port_or_eth_type << 16) | dest_port; 35131e128c81SArun Easi break; 35141e128c81SArun Easi default: 35151e128c81SArun Easi DP_NOTICE(p_hwfn, 35161e128c81SArun Easi "Non valid LLH protocol filter type %d\n", type); 35171e128c81SArun Easi return; 35181e128c81SArun Easi } 35191e128c81SArun Easi 35201e128c81SArun Easi for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 35211e128c81SArun Easi if (!qed_rd(p_hwfn, p_ptt, 35221e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32))) 35231e128c81SArun Easi continue; 35241e128c81SArun Easi if (!qed_rd(p_hwfn, p_ptt, 35251e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32))) 35261e128c81SArun Easi continue; 35271e128c81SArun Easi if (!(qed_rd(p_hwfn, p_ptt, 35281e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 35291e128c81SArun Easi i * sizeof(u32)) & BIT(type))) 35301e128c81SArun Easi continue; 35311e128c81SArun Easi if (qed_rd(p_hwfn, p_ptt, 35321e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 35331e128c81SArun Easi 2 * i * sizeof(u32)) != low) 35341e128c81SArun Easi continue; 35351e128c81SArun Easi if (qed_rd(p_hwfn, p_ptt, 35361e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 35371e128c81SArun Easi (2 * i + 1) * sizeof(u32)) != high) 35381e128c81SArun Easi continue; 35391e128c81SArun Easi 35401e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 35411e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); 35421e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 35431e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); 35441e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 35451e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 35461e128c81SArun Easi i * sizeof(u32), 0); 35471e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 35481e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); 35491e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 35501e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 35511e128c81SArun Easi (2 * i + 1) * sizeof(u32), 0); 35521e128c81SArun Easi break; 35531e128c81SArun Easi } 35541e128c81SArun Easi 35551e128c81SArun Easi if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 35561e128c81SArun Easi DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); 35571e128c81SArun Easi } 35581e128c81SArun Easi 3559722003acSSudarsana Reddy Kalluru static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3560722003acSSudarsana Reddy Kalluru u32 hw_addr, void *p_eth_qzone, 3561722003acSSudarsana Reddy Kalluru size_t eth_qzone_size, u8 timeset) 3562722003acSSudarsana Reddy Kalluru { 3563722003acSSudarsana Reddy Kalluru struct coalescing_timeset *p_coal_timeset; 3564722003acSSudarsana Reddy Kalluru 3565722003acSSudarsana Reddy Kalluru if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) { 3566722003acSSudarsana Reddy Kalluru DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n"); 3567722003acSSudarsana Reddy Kalluru return -EINVAL; 3568722003acSSudarsana Reddy Kalluru } 3569722003acSSudarsana Reddy Kalluru 3570722003acSSudarsana Reddy Kalluru p_coal_timeset = p_eth_qzone; 3571722003acSSudarsana Reddy Kalluru memset(p_coal_timeset, 0, eth_qzone_size); 3572722003acSSudarsana Reddy Kalluru SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 3573722003acSSudarsana Reddy Kalluru SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 3574722003acSSudarsana Reddy Kalluru qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 3575722003acSSudarsana Reddy Kalluru 3576722003acSSudarsana Reddy Kalluru return 0; 3577722003acSSudarsana Reddy Kalluru } 3578722003acSSudarsana Reddy Kalluru 3579722003acSSudarsana Reddy Kalluru int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3580f870a3c6Ssudarsana.kalluru@cavium.com u16 coalesce, u16 qid, u16 sb_id) 3581722003acSSudarsana Reddy Kalluru { 3582722003acSSudarsana Reddy Kalluru struct ustorm_eth_queue_zone eth_qzone; 3583722003acSSudarsana Reddy Kalluru u8 timeset, timer_res; 3584722003acSSudarsana Reddy Kalluru u16 fw_qid = 0; 3585722003acSSudarsana Reddy Kalluru u32 address; 3586722003acSSudarsana Reddy Kalluru int rc; 3587722003acSSudarsana Reddy Kalluru 3588722003acSSudarsana Reddy Kalluru /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 3589722003acSSudarsana Reddy Kalluru if (coalesce <= 0x7F) { 3590722003acSSudarsana Reddy Kalluru timer_res = 0; 3591722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0xFF) { 3592722003acSSudarsana Reddy Kalluru timer_res = 1; 3593722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0x1FF) { 3594722003acSSudarsana Reddy Kalluru timer_res = 2; 3595722003acSSudarsana Reddy Kalluru } else { 3596722003acSSudarsana Reddy Kalluru DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 3597722003acSSudarsana Reddy Kalluru return -EINVAL; 3598722003acSSudarsana Reddy Kalluru } 3599722003acSSudarsana Reddy Kalluru timeset = (u8)(coalesce >> timer_res); 3600722003acSSudarsana Reddy Kalluru 3601f870a3c6Ssudarsana.kalluru@cavium.com rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); 3602722003acSSudarsana Reddy Kalluru if (rc) 3603722003acSSudarsana Reddy Kalluru return rc; 3604722003acSSudarsana Reddy Kalluru 3605722003acSSudarsana Reddy Kalluru rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false); 3606722003acSSudarsana Reddy Kalluru if (rc) 3607722003acSSudarsana Reddy Kalluru goto out; 3608722003acSSudarsana Reddy Kalluru 3609722003acSSudarsana Reddy Kalluru address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); 3610722003acSSudarsana Reddy Kalluru 3611722003acSSudarsana Reddy Kalluru rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 3612722003acSSudarsana Reddy Kalluru sizeof(struct ustorm_eth_queue_zone), timeset); 3613722003acSSudarsana Reddy Kalluru if (rc) 3614722003acSSudarsana Reddy Kalluru goto out; 3615722003acSSudarsana Reddy Kalluru 3616722003acSSudarsana Reddy Kalluru p_hwfn->cdev->rx_coalesce_usecs = coalesce; 3617722003acSSudarsana Reddy Kalluru out: 3618722003acSSudarsana Reddy Kalluru return rc; 3619722003acSSudarsana Reddy Kalluru } 3620722003acSSudarsana Reddy Kalluru 3621722003acSSudarsana Reddy Kalluru int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3622f870a3c6Ssudarsana.kalluru@cavium.com u16 coalesce, u16 qid, u16 sb_id) 3623722003acSSudarsana Reddy Kalluru { 3624722003acSSudarsana Reddy Kalluru struct xstorm_eth_queue_zone eth_qzone; 3625722003acSSudarsana Reddy Kalluru u8 timeset, timer_res; 3626722003acSSudarsana Reddy Kalluru u16 fw_qid = 0; 3627722003acSSudarsana Reddy Kalluru u32 address; 3628722003acSSudarsana Reddy Kalluru int rc; 3629722003acSSudarsana Reddy Kalluru 3630722003acSSudarsana Reddy Kalluru /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 3631722003acSSudarsana Reddy Kalluru if (coalesce <= 0x7F) { 3632722003acSSudarsana Reddy Kalluru timer_res = 0; 3633722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0xFF) { 3634722003acSSudarsana Reddy Kalluru timer_res = 1; 3635722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0x1FF) { 3636722003acSSudarsana Reddy Kalluru timer_res = 2; 3637722003acSSudarsana Reddy Kalluru } else { 3638722003acSSudarsana Reddy Kalluru DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 3639722003acSSudarsana Reddy Kalluru return -EINVAL; 3640722003acSSudarsana Reddy Kalluru } 3641722003acSSudarsana Reddy Kalluru timeset = (u8)(coalesce >> timer_res); 3642722003acSSudarsana Reddy Kalluru 3643f870a3c6Ssudarsana.kalluru@cavium.com rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); 3644722003acSSudarsana Reddy Kalluru if (rc) 3645722003acSSudarsana Reddy Kalluru return rc; 3646722003acSSudarsana Reddy Kalluru 3647722003acSSudarsana Reddy Kalluru rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true); 3648722003acSSudarsana Reddy Kalluru if (rc) 3649722003acSSudarsana Reddy Kalluru goto out; 3650722003acSSudarsana Reddy Kalluru 3651722003acSSudarsana Reddy Kalluru address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); 3652722003acSSudarsana Reddy Kalluru 3653722003acSSudarsana Reddy Kalluru rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 3654722003acSSudarsana Reddy Kalluru sizeof(struct xstorm_eth_queue_zone), timeset); 3655722003acSSudarsana Reddy Kalluru if (rc) 3656722003acSSudarsana Reddy Kalluru goto out; 3657722003acSSudarsana Reddy Kalluru 3658722003acSSudarsana Reddy Kalluru p_hwfn->cdev->tx_coalesce_usecs = coalesce; 3659722003acSSudarsana Reddy Kalluru out: 3660722003acSSudarsana Reddy Kalluru return rc; 3661722003acSSudarsana Reddy Kalluru } 3662722003acSSudarsana Reddy Kalluru 3663bcd197c8SManish Chopra /* Calculate final WFQ values for all vports and configure them. 3664bcd197c8SManish Chopra * After this configuration each vport will have 3665bcd197c8SManish Chopra * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) 3666bcd197c8SManish Chopra */ 3667bcd197c8SManish Chopra static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, 3668bcd197c8SManish Chopra struct qed_ptt *p_ptt, 3669bcd197c8SManish Chopra u32 min_pf_rate) 3670bcd197c8SManish Chopra { 3671bcd197c8SManish Chopra struct init_qm_vport_params *vport_params; 3672bcd197c8SManish Chopra int i; 3673bcd197c8SManish Chopra 3674bcd197c8SManish Chopra vport_params = p_hwfn->qm_info.qm_vport_params; 3675bcd197c8SManish Chopra 3676bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 3677bcd197c8SManish Chopra u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 3678bcd197c8SManish Chopra 3679bcd197c8SManish Chopra vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) / 3680bcd197c8SManish Chopra min_pf_rate; 3681bcd197c8SManish Chopra qed_init_vport_wfq(p_hwfn, p_ptt, 3682bcd197c8SManish Chopra vport_params[i].first_tx_pq_id, 3683bcd197c8SManish Chopra vport_params[i].vport_wfq); 3684bcd197c8SManish Chopra } 3685bcd197c8SManish Chopra } 3686bcd197c8SManish Chopra 3687bcd197c8SManish Chopra static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, 3688bcd197c8SManish Chopra u32 min_pf_rate) 3689bcd197c8SManish Chopra 3690bcd197c8SManish Chopra { 3691bcd197c8SManish Chopra int i; 3692bcd197c8SManish Chopra 3693bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 3694bcd197c8SManish Chopra p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 3695bcd197c8SManish Chopra } 3696bcd197c8SManish Chopra 3697bcd197c8SManish Chopra static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, 3698bcd197c8SManish Chopra struct qed_ptt *p_ptt, 3699bcd197c8SManish Chopra u32 min_pf_rate) 3700bcd197c8SManish Chopra { 3701bcd197c8SManish Chopra struct init_qm_vport_params *vport_params; 3702bcd197c8SManish Chopra int i; 3703bcd197c8SManish Chopra 3704bcd197c8SManish Chopra vport_params = p_hwfn->qm_info.qm_vport_params; 3705bcd197c8SManish Chopra 3706bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 3707bcd197c8SManish Chopra qed_init_wfq_default_param(p_hwfn, min_pf_rate); 3708bcd197c8SManish Chopra qed_init_vport_wfq(p_hwfn, p_ptt, 3709bcd197c8SManish Chopra vport_params[i].first_tx_pq_id, 3710bcd197c8SManish Chopra vport_params[i].vport_wfq); 3711bcd197c8SManish Chopra } 3712bcd197c8SManish Chopra } 3713bcd197c8SManish Chopra 3714bcd197c8SManish Chopra /* This function performs several validations for WFQ 3715bcd197c8SManish Chopra * configuration and required min rate for a given vport 3716bcd197c8SManish Chopra * 1. req_rate must be greater than one percent of min_pf_rate. 3717bcd197c8SManish Chopra * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 3718bcd197c8SManish Chopra * rates to get less than one percent of min_pf_rate. 3719bcd197c8SManish Chopra * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 3720bcd197c8SManish Chopra */ 3721bcd197c8SManish Chopra static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, 37221a635e48SYuval Mintz u16 vport_id, u32 req_rate, u32 min_pf_rate) 3723bcd197c8SManish Chopra { 3724bcd197c8SManish Chopra u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 3725bcd197c8SManish Chopra int non_requested_count = 0, req_count = 0, i, num_vports; 3726bcd197c8SManish Chopra 3727bcd197c8SManish Chopra num_vports = p_hwfn->qm_info.num_vports; 3728bcd197c8SManish Chopra 3729bcd197c8SManish Chopra /* Accounting for the vports which are configured for WFQ explicitly */ 3730bcd197c8SManish Chopra for (i = 0; i < num_vports; i++) { 3731bcd197c8SManish Chopra u32 tmp_speed; 3732bcd197c8SManish Chopra 3733bcd197c8SManish Chopra if ((i != vport_id) && 3734bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[i].configured) { 3735bcd197c8SManish Chopra req_count++; 3736bcd197c8SManish Chopra tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 3737bcd197c8SManish Chopra total_req_min_rate += tmp_speed; 3738bcd197c8SManish Chopra } 3739bcd197c8SManish Chopra } 3740bcd197c8SManish Chopra 3741bcd197c8SManish Chopra /* Include current vport data as well */ 3742bcd197c8SManish Chopra req_count++; 3743bcd197c8SManish Chopra total_req_min_rate += req_rate; 3744bcd197c8SManish Chopra non_requested_count = num_vports - req_count; 3745bcd197c8SManish Chopra 3746bcd197c8SManish Chopra if (req_rate < min_pf_rate / QED_WFQ_UNIT) { 3747bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3748bcd197c8SManish Chopra "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 3749bcd197c8SManish Chopra vport_id, req_rate, min_pf_rate); 3750bcd197c8SManish Chopra return -EINVAL; 3751bcd197c8SManish Chopra } 3752bcd197c8SManish Chopra 3753bcd197c8SManish Chopra if (num_vports > QED_WFQ_UNIT) { 3754bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3755bcd197c8SManish Chopra "Number of vports is greater than %d\n", 3756bcd197c8SManish Chopra QED_WFQ_UNIT); 3757bcd197c8SManish Chopra return -EINVAL; 3758bcd197c8SManish Chopra } 3759bcd197c8SManish Chopra 3760bcd197c8SManish Chopra if (total_req_min_rate > min_pf_rate) { 3761bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3762bcd197c8SManish Chopra "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 3763bcd197c8SManish Chopra total_req_min_rate, min_pf_rate); 3764bcd197c8SManish Chopra return -EINVAL; 3765bcd197c8SManish Chopra } 3766bcd197c8SManish Chopra 3767bcd197c8SManish Chopra total_left_rate = min_pf_rate - total_req_min_rate; 3768bcd197c8SManish Chopra 3769bcd197c8SManish Chopra left_rate_per_vp = total_left_rate / non_requested_count; 3770bcd197c8SManish Chopra if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { 3771bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3772bcd197c8SManish Chopra "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 3773bcd197c8SManish Chopra left_rate_per_vp, min_pf_rate); 3774bcd197c8SManish Chopra return -EINVAL; 3775bcd197c8SManish Chopra } 3776bcd197c8SManish Chopra 3777bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 3778bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[vport_id].configured = true; 3779bcd197c8SManish Chopra 3780bcd197c8SManish Chopra for (i = 0; i < num_vports; i++) { 3781bcd197c8SManish Chopra if (p_hwfn->qm_info.wfq_data[i].configured) 3782bcd197c8SManish Chopra continue; 3783bcd197c8SManish Chopra 3784bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 3785bcd197c8SManish Chopra } 3786bcd197c8SManish Chopra 3787bcd197c8SManish Chopra return 0; 3788bcd197c8SManish Chopra } 3789bcd197c8SManish Chopra 3790733def6aSYuval Mintz static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, 3791733def6aSYuval Mintz struct qed_ptt *p_ptt, u16 vp_id, u32 rate) 3792733def6aSYuval Mintz { 3793733def6aSYuval Mintz struct qed_mcp_link_state *p_link; 3794733def6aSYuval Mintz int rc = 0; 3795733def6aSYuval Mintz 3796733def6aSYuval Mintz p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; 3797733def6aSYuval Mintz 3798733def6aSYuval Mintz if (!p_link->min_pf_rate) { 3799733def6aSYuval Mintz p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 3800733def6aSYuval Mintz p_hwfn->qm_info.wfq_data[vp_id].configured = true; 3801733def6aSYuval Mintz return rc; 3802733def6aSYuval Mintz } 3803733def6aSYuval Mintz 3804733def6aSYuval Mintz rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 3805733def6aSYuval Mintz 38061a635e48SYuval Mintz if (!rc) 3807733def6aSYuval Mintz qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, 3808733def6aSYuval Mintz p_link->min_pf_rate); 3809733def6aSYuval Mintz else 3810733def6aSYuval Mintz DP_NOTICE(p_hwfn, 3811733def6aSYuval Mintz "Validation failed while configuring min rate\n"); 3812733def6aSYuval Mintz 3813733def6aSYuval Mintz return rc; 3814733def6aSYuval Mintz } 3815733def6aSYuval Mintz 3816bcd197c8SManish Chopra static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, 3817bcd197c8SManish Chopra struct qed_ptt *p_ptt, 3818bcd197c8SManish Chopra u32 min_pf_rate) 3819bcd197c8SManish Chopra { 3820bcd197c8SManish Chopra bool use_wfq = false; 3821bcd197c8SManish Chopra int rc = 0; 3822bcd197c8SManish Chopra u16 i; 3823bcd197c8SManish Chopra 3824bcd197c8SManish Chopra /* Validate all pre configured vports for wfq */ 3825bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 3826bcd197c8SManish Chopra u32 rate; 3827bcd197c8SManish Chopra 3828bcd197c8SManish Chopra if (!p_hwfn->qm_info.wfq_data[i].configured) 3829bcd197c8SManish Chopra continue; 3830bcd197c8SManish Chopra 3831bcd197c8SManish Chopra rate = p_hwfn->qm_info.wfq_data[i].min_speed; 3832bcd197c8SManish Chopra use_wfq = true; 3833bcd197c8SManish Chopra 3834bcd197c8SManish Chopra rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 3835bcd197c8SManish Chopra if (rc) { 3836bcd197c8SManish Chopra DP_NOTICE(p_hwfn, 3837bcd197c8SManish Chopra "WFQ validation failed while configuring min rate\n"); 3838bcd197c8SManish Chopra break; 3839bcd197c8SManish Chopra } 3840bcd197c8SManish Chopra } 3841bcd197c8SManish Chopra 3842bcd197c8SManish Chopra if (!rc && use_wfq) 3843bcd197c8SManish Chopra qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 3844bcd197c8SManish Chopra else 3845bcd197c8SManish Chopra qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 3846bcd197c8SManish Chopra 3847bcd197c8SManish Chopra return rc; 3848bcd197c8SManish Chopra } 3849bcd197c8SManish Chopra 3850733def6aSYuval Mintz /* Main API for qed clients to configure vport min rate. 3851733def6aSYuval Mintz * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 3852733def6aSYuval Mintz * rate - Speed in Mbps needs to be assigned to a given vport. 3853733def6aSYuval Mintz */ 3854733def6aSYuval Mintz int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) 3855733def6aSYuval Mintz { 3856733def6aSYuval Mintz int i, rc = -EINVAL; 3857733def6aSYuval Mintz 3858733def6aSYuval Mintz /* Currently not supported; Might change in future */ 3859733def6aSYuval Mintz if (cdev->num_hwfns > 1) { 3860733def6aSYuval Mintz DP_NOTICE(cdev, 3861733def6aSYuval Mintz "WFQ configuration is not supported for this device\n"); 3862733def6aSYuval Mintz return rc; 3863733def6aSYuval Mintz } 3864733def6aSYuval Mintz 3865733def6aSYuval Mintz for_each_hwfn(cdev, i) { 3866733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3867733def6aSYuval Mintz struct qed_ptt *p_ptt; 3868733def6aSYuval Mintz 3869733def6aSYuval Mintz p_ptt = qed_ptt_acquire(p_hwfn); 3870733def6aSYuval Mintz if (!p_ptt) 3871733def6aSYuval Mintz return -EBUSY; 3872733def6aSYuval Mintz 3873733def6aSYuval Mintz rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 3874733def6aSYuval Mintz 3875d572c430SYuval Mintz if (rc) { 3876733def6aSYuval Mintz qed_ptt_release(p_hwfn, p_ptt); 3877733def6aSYuval Mintz return rc; 3878733def6aSYuval Mintz } 3879733def6aSYuval Mintz 3880733def6aSYuval Mintz qed_ptt_release(p_hwfn, p_ptt); 3881733def6aSYuval Mintz } 3882733def6aSYuval Mintz 3883733def6aSYuval Mintz return rc; 3884733def6aSYuval Mintz } 3885733def6aSYuval Mintz 3886bcd197c8SManish Chopra /* API to configure WFQ from mcp link change */ 38876f437d43SMintz, Yuval void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, 38886f437d43SMintz, Yuval struct qed_ptt *p_ptt, u32 min_pf_rate) 3889bcd197c8SManish Chopra { 3890bcd197c8SManish Chopra int i; 3891bcd197c8SManish Chopra 38923e7cfce2SYuval Mintz if (cdev->num_hwfns > 1) { 38933e7cfce2SYuval Mintz DP_VERBOSE(cdev, 38943e7cfce2SYuval Mintz NETIF_MSG_LINK, 38953e7cfce2SYuval Mintz "WFQ configuration is not supported for this device\n"); 38963e7cfce2SYuval Mintz return; 38973e7cfce2SYuval Mintz } 38983e7cfce2SYuval Mintz 3899bcd197c8SManish Chopra for_each_hwfn(cdev, i) { 3900bcd197c8SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3901bcd197c8SManish Chopra 39026f437d43SMintz, Yuval __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 3903bcd197c8SManish Chopra min_pf_rate); 3904bcd197c8SManish Chopra } 3905bcd197c8SManish Chopra } 39064b01e519SManish Chopra 39074b01e519SManish Chopra int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, 39084b01e519SManish Chopra struct qed_ptt *p_ptt, 39094b01e519SManish Chopra struct qed_mcp_link_state *p_link, 39104b01e519SManish Chopra u8 max_bw) 39114b01e519SManish Chopra { 39124b01e519SManish Chopra int rc = 0; 39134b01e519SManish Chopra 39144b01e519SManish Chopra p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 39154b01e519SManish Chopra 39164b01e519SManish Chopra if (!p_link->line_speed && (max_bw != 100)) 39174b01e519SManish Chopra return rc; 39184b01e519SManish Chopra 39194b01e519SManish Chopra p_link->speed = (p_link->line_speed * max_bw) / 100; 39204b01e519SManish Chopra p_hwfn->qm_info.pf_rl = p_link->speed; 39214b01e519SManish Chopra 39224b01e519SManish Chopra /* Since the limiter also affects Tx-switched traffic, we don't want it 39234b01e519SManish Chopra * to limit such traffic in case there's no actual limit. 39244b01e519SManish Chopra * In that case, set limit to imaginary high boundary. 39254b01e519SManish Chopra */ 39264b01e519SManish Chopra if (max_bw == 100) 39274b01e519SManish Chopra p_hwfn->qm_info.pf_rl = 100000; 39284b01e519SManish Chopra 39294b01e519SManish Chopra rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 39304b01e519SManish Chopra p_hwfn->qm_info.pf_rl); 39314b01e519SManish Chopra 39324b01e519SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 39334b01e519SManish Chopra "Configured MAX bandwidth to be %08x Mb/sec\n", 39344b01e519SManish Chopra p_link->speed); 39354b01e519SManish Chopra 39364b01e519SManish Chopra return rc; 39374b01e519SManish Chopra } 39384b01e519SManish Chopra 39394b01e519SManish Chopra /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 39404b01e519SManish Chopra int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) 39414b01e519SManish Chopra { 39424b01e519SManish Chopra int i, rc = -EINVAL; 39434b01e519SManish Chopra 39444b01e519SManish Chopra if (max_bw < 1 || max_bw > 100) { 39454b01e519SManish Chopra DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n"); 39464b01e519SManish Chopra return rc; 39474b01e519SManish Chopra } 39484b01e519SManish Chopra 39494b01e519SManish Chopra for_each_hwfn(cdev, i) { 39504b01e519SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 39514b01e519SManish Chopra struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); 39524b01e519SManish Chopra struct qed_mcp_link_state *p_link; 39534b01e519SManish Chopra struct qed_ptt *p_ptt; 39544b01e519SManish Chopra 39554b01e519SManish Chopra p_link = &p_lead->mcp_info->link_output; 39564b01e519SManish Chopra 39574b01e519SManish Chopra p_ptt = qed_ptt_acquire(p_hwfn); 39584b01e519SManish Chopra if (!p_ptt) 39594b01e519SManish Chopra return -EBUSY; 39604b01e519SManish Chopra 39614b01e519SManish Chopra rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, 39624b01e519SManish Chopra p_link, max_bw); 39634b01e519SManish Chopra 39644b01e519SManish Chopra qed_ptt_release(p_hwfn, p_ptt); 39654b01e519SManish Chopra 39664b01e519SManish Chopra if (rc) 39674b01e519SManish Chopra break; 39684b01e519SManish Chopra } 39694b01e519SManish Chopra 39704b01e519SManish Chopra return rc; 39714b01e519SManish Chopra } 3972a64b02d5SManish Chopra 3973a64b02d5SManish Chopra int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, 3974a64b02d5SManish Chopra struct qed_ptt *p_ptt, 3975a64b02d5SManish Chopra struct qed_mcp_link_state *p_link, 3976a64b02d5SManish Chopra u8 min_bw) 3977a64b02d5SManish Chopra { 3978a64b02d5SManish Chopra int rc = 0; 3979a64b02d5SManish Chopra 3980a64b02d5SManish Chopra p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 3981a64b02d5SManish Chopra p_hwfn->qm_info.pf_wfq = min_bw; 3982a64b02d5SManish Chopra 3983a64b02d5SManish Chopra if (!p_link->line_speed) 3984a64b02d5SManish Chopra return rc; 3985a64b02d5SManish Chopra 3986a64b02d5SManish Chopra p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 3987a64b02d5SManish Chopra 3988a64b02d5SManish Chopra rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 3989a64b02d5SManish Chopra 3990a64b02d5SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3991a64b02d5SManish Chopra "Configured MIN bandwidth to be %d Mb/sec\n", 3992a64b02d5SManish Chopra p_link->min_pf_rate); 3993a64b02d5SManish Chopra 3994a64b02d5SManish Chopra return rc; 3995a64b02d5SManish Chopra } 3996a64b02d5SManish Chopra 3997a64b02d5SManish Chopra /* Main API to configure PF min bandwidth where bw range is [1-100] */ 3998a64b02d5SManish Chopra int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) 3999a64b02d5SManish Chopra { 4000a64b02d5SManish Chopra int i, rc = -EINVAL; 4001a64b02d5SManish Chopra 4002a64b02d5SManish Chopra if (min_bw < 1 || min_bw > 100) { 4003a64b02d5SManish Chopra DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n"); 4004a64b02d5SManish Chopra return rc; 4005a64b02d5SManish Chopra } 4006a64b02d5SManish Chopra 4007a64b02d5SManish Chopra for_each_hwfn(cdev, i) { 4008a64b02d5SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4009a64b02d5SManish Chopra struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); 4010a64b02d5SManish Chopra struct qed_mcp_link_state *p_link; 4011a64b02d5SManish Chopra struct qed_ptt *p_ptt; 4012a64b02d5SManish Chopra 4013a64b02d5SManish Chopra p_link = &p_lead->mcp_info->link_output; 4014a64b02d5SManish Chopra 4015a64b02d5SManish Chopra p_ptt = qed_ptt_acquire(p_hwfn); 4016a64b02d5SManish Chopra if (!p_ptt) 4017a64b02d5SManish Chopra return -EBUSY; 4018a64b02d5SManish Chopra 4019a64b02d5SManish Chopra rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, 4020a64b02d5SManish Chopra p_link, min_bw); 4021a64b02d5SManish Chopra if (rc) { 4022a64b02d5SManish Chopra qed_ptt_release(p_hwfn, p_ptt); 4023a64b02d5SManish Chopra return rc; 4024a64b02d5SManish Chopra } 4025a64b02d5SManish Chopra 4026a64b02d5SManish Chopra if (p_link->min_pf_rate) { 4027a64b02d5SManish Chopra u32 min_rate = p_link->min_pf_rate; 4028a64b02d5SManish Chopra 4029a64b02d5SManish Chopra rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, 4030a64b02d5SManish Chopra p_ptt, 4031a64b02d5SManish Chopra min_rate); 4032a64b02d5SManish Chopra } 4033a64b02d5SManish Chopra 4034a64b02d5SManish Chopra qed_ptt_release(p_hwfn, p_ptt); 4035a64b02d5SManish Chopra } 4036a64b02d5SManish Chopra 4037a64b02d5SManish Chopra return rc; 4038a64b02d5SManish Chopra } 4039733def6aSYuval Mintz 4040733def6aSYuval Mintz void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 4041733def6aSYuval Mintz { 4042733def6aSYuval Mintz struct qed_mcp_link_state *p_link; 4043733def6aSYuval Mintz 4044733def6aSYuval Mintz p_link = &p_hwfn->mcp_info->link_output; 4045733def6aSYuval Mintz 4046733def6aSYuval Mintz if (p_link->min_pf_rate) 4047733def6aSYuval Mintz qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, 4048733def6aSYuval Mintz p_link->min_pf_rate); 4049733def6aSYuval Mintz 4050733def6aSYuval Mintz memset(p_hwfn->qm_info.wfq_data, 0, 4051733def6aSYuval Mintz sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); 4052733def6aSYuval Mintz } 40539c79ddaaSMintz, Yuval 40549c79ddaaSMintz, Yuval int qed_device_num_engines(struct qed_dev *cdev) 40559c79ddaaSMintz, Yuval { 40569c79ddaaSMintz, Yuval return QED_IS_BB(cdev) ? 2 : 1; 40579c79ddaaSMintz, Yuval } 4058db82f70eSsudarsana.kalluru@cavium.com 4059db82f70eSsudarsana.kalluru@cavium.com static int qed_device_num_ports(struct qed_dev *cdev) 4060db82f70eSsudarsana.kalluru@cavium.com { 4061db82f70eSsudarsana.kalluru@cavium.com /* in CMT always only one port */ 4062db82f70eSsudarsana.kalluru@cavium.com if (cdev->num_hwfns > 1) 4063db82f70eSsudarsana.kalluru@cavium.com return 1; 4064db82f70eSsudarsana.kalluru@cavium.com 4065db82f70eSsudarsana.kalluru@cavium.com return cdev->num_ports_in_engines * qed_device_num_engines(cdev); 4066db82f70eSsudarsana.kalluru@cavium.com } 4067db82f70eSsudarsana.kalluru@cavium.com 4068db82f70eSsudarsana.kalluru@cavium.com int qed_device_get_port_id(struct qed_dev *cdev) 4069db82f70eSsudarsana.kalluru@cavium.com { 4070db82f70eSsudarsana.kalluru@cavium.com return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); 4071db82f70eSsudarsana.kalluru@cavium.com } 4072