1fe56b9e6SYuval Mintz /* QLogic qed NIC Driver 2e8f1cb50SMintz, Yuval * Copyright (c) 2015-2017 QLogic Corporation 3fe56b9e6SYuval Mintz * 4e8f1cb50SMintz, Yuval * This software is available to you under a choice of one of two 5e8f1cb50SMintz, Yuval * licenses. You may choose to be licensed under the terms of the GNU 6e8f1cb50SMintz, Yuval * General Public License (GPL) Version 2, available from the file 7e8f1cb50SMintz, Yuval * COPYING in the main directory of this source tree, or the 8e8f1cb50SMintz, Yuval * OpenIB.org BSD license below: 9e8f1cb50SMintz, Yuval * 10e8f1cb50SMintz, Yuval * Redistribution and use in source and binary forms, with or 11e8f1cb50SMintz, Yuval * without modification, are permitted provided that the following 12e8f1cb50SMintz, Yuval * conditions are met: 13e8f1cb50SMintz, Yuval * 14e8f1cb50SMintz, Yuval * - Redistributions of source code must retain the above 15e8f1cb50SMintz, Yuval * copyright notice, this list of conditions and the following 16e8f1cb50SMintz, Yuval * disclaimer. 17e8f1cb50SMintz, Yuval * 18e8f1cb50SMintz, Yuval * - Redistributions in binary form must reproduce the above 19e8f1cb50SMintz, Yuval * copyright notice, this list of conditions and the following 20e8f1cb50SMintz, Yuval * disclaimer in the documentation and /or other materials 21e8f1cb50SMintz, Yuval * provided with the distribution. 22e8f1cb50SMintz, Yuval * 23e8f1cb50SMintz, Yuval * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e8f1cb50SMintz, Yuval * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e8f1cb50SMintz, Yuval * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e8f1cb50SMintz, Yuval * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e8f1cb50SMintz, Yuval * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e8f1cb50SMintz, Yuval * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e8f1cb50SMintz, Yuval * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e8f1cb50SMintz, Yuval * SOFTWARE. 31fe56b9e6SYuval Mintz */ 32fe56b9e6SYuval Mintz 33fe56b9e6SYuval Mintz #include <linux/types.h> 34fe56b9e6SYuval Mintz #include <asm/byteorder.h> 35fe56b9e6SYuval Mintz #include <linux/io.h> 36fe56b9e6SYuval Mintz #include <linux/delay.h> 37fe56b9e6SYuval Mintz #include <linux/dma-mapping.h> 38fe56b9e6SYuval Mintz #include <linux/errno.h> 39fe56b9e6SYuval Mintz #include <linux/kernel.h> 40fe56b9e6SYuval Mintz #include <linux/mutex.h> 41fe56b9e6SYuval Mintz #include <linux/pci.h> 42fe56b9e6SYuval Mintz #include <linux/slab.h> 43fe56b9e6SYuval Mintz #include <linux/string.h> 44a91eb52aSYuval Mintz #include <linux/vmalloc.h> 45fe56b9e6SYuval Mintz #include <linux/etherdevice.h> 46fe56b9e6SYuval Mintz #include <linux/qed/qed_chain.h> 47fe56b9e6SYuval Mintz #include <linux/qed/qed_if.h> 48fe56b9e6SYuval Mintz #include "qed.h" 49fe56b9e6SYuval Mintz #include "qed_cxt.h" 5039651abdSSudarsana Reddy Kalluru #include "qed_dcbx.h" 51fe56b9e6SYuval Mintz #include "qed_dev_api.h" 521e128c81SArun Easi #include "qed_fcoe.h" 53fe56b9e6SYuval Mintz #include "qed_hsi.h" 54fe56b9e6SYuval Mintz #include "qed_hw.h" 55fe56b9e6SYuval Mintz #include "qed_init_ops.h" 56fe56b9e6SYuval Mintz #include "qed_int.h" 57fc831825SYuval Mintz #include "qed_iscsi.h" 580a7fb11cSYuval Mintz #include "qed_ll2.h" 59fe56b9e6SYuval Mintz #include "qed_mcp.h" 601d6cff4fSYuval Mintz #include "qed_ooo.h" 61fe56b9e6SYuval Mintz #include "qed_reg_addr.h" 62fe56b9e6SYuval Mintz #include "qed_sp.h" 6332a47e72SYuval Mintz #include "qed_sriov.h" 640b55e27dSYuval Mintz #include "qed_vf.h" 6551ff1725SRam Amrani #include "qed_roce.h" 66fe56b9e6SYuval Mintz 670caf5b26SWei Yongjun static DEFINE_SPINLOCK(qm_lock); 6839651abdSSudarsana Reddy Kalluru 6951ff1725SRam Amrani #define QED_MIN_DPIS (4) 7051ff1725SRam Amrani #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) 7151ff1725SRam Amrani 72fe56b9e6SYuval Mintz /* API common to all protocols */ 73c2035eeaSRam Amrani enum BAR_ID { 74c2035eeaSRam Amrani BAR_ID_0, /* used for GRC */ 75c2035eeaSRam Amrani BAR_ID_1 /* Used for doorbells */ 76c2035eeaSRam Amrani }; 77c2035eeaSRam Amrani 7815582962SRahul Verma static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, 7915582962SRahul Verma struct qed_ptt *p_ptt, enum BAR_ID bar_id) 80c2035eeaSRam Amrani { 81c2035eeaSRam Amrani u32 bar_reg = (bar_id == BAR_ID_0 ? 82c2035eeaSRam Amrani PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 831408cc1fSYuval Mintz u32 val; 84c2035eeaSRam Amrani 851408cc1fSYuval Mintz if (IS_VF(p_hwfn->cdev)) 861408cc1fSYuval Mintz return 1 << 17; 871408cc1fSYuval Mintz 8815582962SRahul Verma val = qed_rd(p_hwfn, p_ptt, bar_reg); 89c2035eeaSRam Amrani if (val) 90c2035eeaSRam Amrani return 1 << (val + 15); 91c2035eeaSRam Amrani 92c2035eeaSRam Amrani /* Old MFW initialized above registered only conditionally */ 93c2035eeaSRam Amrani if (p_hwfn->cdev->num_hwfns > 1) { 94c2035eeaSRam Amrani DP_INFO(p_hwfn, 95c2035eeaSRam Amrani "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 96c2035eeaSRam Amrani return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 97c2035eeaSRam Amrani } else { 98c2035eeaSRam Amrani DP_INFO(p_hwfn, 99c2035eeaSRam Amrani "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 100c2035eeaSRam Amrani return 512 * 1024; 101c2035eeaSRam Amrani } 102c2035eeaSRam Amrani } 103c2035eeaSRam Amrani 1041a635e48SYuval Mintz void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) 105fe56b9e6SYuval Mintz { 106fe56b9e6SYuval Mintz u32 i; 107fe56b9e6SYuval Mintz 108fe56b9e6SYuval Mintz cdev->dp_level = dp_level; 109fe56b9e6SYuval Mintz cdev->dp_module = dp_module; 110fe56b9e6SYuval Mintz for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 111fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 112fe56b9e6SYuval Mintz 113fe56b9e6SYuval Mintz p_hwfn->dp_level = dp_level; 114fe56b9e6SYuval Mintz p_hwfn->dp_module = dp_module; 115fe56b9e6SYuval Mintz } 116fe56b9e6SYuval Mintz } 117fe56b9e6SYuval Mintz 118fe56b9e6SYuval Mintz void qed_init_struct(struct qed_dev *cdev) 119fe56b9e6SYuval Mintz { 120fe56b9e6SYuval Mintz u8 i; 121fe56b9e6SYuval Mintz 122fe56b9e6SYuval Mintz for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 123fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 124fe56b9e6SYuval Mintz 125fe56b9e6SYuval Mintz p_hwfn->cdev = cdev; 126fe56b9e6SYuval Mintz p_hwfn->my_id = i; 127fe56b9e6SYuval Mintz p_hwfn->b_active = false; 128fe56b9e6SYuval Mintz 129fe56b9e6SYuval Mintz mutex_init(&p_hwfn->dmae_info.mutex); 130fe56b9e6SYuval Mintz } 131fe56b9e6SYuval Mintz 132fe56b9e6SYuval Mintz /* hwfn 0 is always active */ 133fe56b9e6SYuval Mintz cdev->hwfns[0].b_active = true; 134fe56b9e6SYuval Mintz 135fe56b9e6SYuval Mintz /* set the default cache alignment to 128 */ 136fe56b9e6SYuval Mintz cdev->cache_shift = 7; 137fe56b9e6SYuval Mintz } 138fe56b9e6SYuval Mintz 139fe56b9e6SYuval Mintz static void qed_qm_info_free(struct qed_hwfn *p_hwfn) 140fe56b9e6SYuval Mintz { 141fe56b9e6SYuval Mintz struct qed_qm_info *qm_info = &p_hwfn->qm_info; 142fe56b9e6SYuval Mintz 143fe56b9e6SYuval Mintz kfree(qm_info->qm_pq_params); 144fe56b9e6SYuval Mintz qm_info->qm_pq_params = NULL; 145fe56b9e6SYuval Mintz kfree(qm_info->qm_vport_params); 146fe56b9e6SYuval Mintz qm_info->qm_vport_params = NULL; 147fe56b9e6SYuval Mintz kfree(qm_info->qm_port_params); 148fe56b9e6SYuval Mintz qm_info->qm_port_params = NULL; 149bcd197c8SManish Chopra kfree(qm_info->wfq_data); 150bcd197c8SManish Chopra qm_info->wfq_data = NULL; 151fe56b9e6SYuval Mintz } 152fe56b9e6SYuval Mintz 153fe56b9e6SYuval Mintz void qed_resc_free(struct qed_dev *cdev) 154fe56b9e6SYuval Mintz { 155fe56b9e6SYuval Mintz int i; 156fe56b9e6SYuval Mintz 1571408cc1fSYuval Mintz if (IS_VF(cdev)) 1581408cc1fSYuval Mintz return; 1591408cc1fSYuval Mintz 160fe56b9e6SYuval Mintz kfree(cdev->fw_data); 161fe56b9e6SYuval Mintz cdev->fw_data = NULL; 162fe56b9e6SYuval Mintz 163fe56b9e6SYuval Mintz kfree(cdev->reset_stats); 164fe56b9e6SYuval Mintz 165fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 166fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 167fe56b9e6SYuval Mintz 168fe56b9e6SYuval Mintz qed_cxt_mngr_free(p_hwfn); 169fe56b9e6SYuval Mintz qed_qm_info_free(p_hwfn); 170fe56b9e6SYuval Mintz qed_spq_free(p_hwfn); 171fe56b9e6SYuval Mintz qed_eq_free(p_hwfn, p_hwfn->p_eq); 172fe56b9e6SYuval Mintz qed_consq_free(p_hwfn, p_hwfn->p_consq); 173fe56b9e6SYuval Mintz qed_int_free(p_hwfn); 1740a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2 1750a7fb11cSYuval Mintz qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); 1760a7fb11cSYuval Mintz #endif 1771e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 1781e128c81SArun Easi qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info); 1791e128c81SArun Easi 1801d6cff4fSYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 181fc831825SYuval Mintz qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info); 1821d6cff4fSYuval Mintz qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info); 1831d6cff4fSYuval Mintz } 18432a47e72SYuval Mintz qed_iov_free(p_hwfn); 185fe56b9e6SYuval Mintz qed_dmae_info_free(p_hwfn); 186270837b3Ssudarsana.kalluru@cavium.com qed_dcbx_info_free(p_hwfn); 187fe56b9e6SYuval Mintz } 188fe56b9e6SYuval Mintz } 189fe56b9e6SYuval Mintz 190b5a9ee7cSAriel Elior /******************** QM initialization *******************/ 191b5a9ee7cSAriel Elior #define ACTIVE_TCS_BMAP 0x9f 192b5a9ee7cSAriel Elior #define ACTIVE_TCS_BMAP_4PORT_K2 0xf 193b5a9ee7cSAriel Elior 194b5a9ee7cSAriel Elior /* determines the physical queue flags for a given PF. */ 195b5a9ee7cSAriel Elior static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) 196fe56b9e6SYuval Mintz { 197b5a9ee7cSAriel Elior u32 flags; 198fe56b9e6SYuval Mintz 199b5a9ee7cSAriel Elior /* common flags */ 200b5a9ee7cSAriel Elior flags = PQ_FLAGS_LB; 201fe56b9e6SYuval Mintz 202b5a9ee7cSAriel Elior /* feature flags */ 203b5a9ee7cSAriel Elior if (IS_QED_SRIOV(p_hwfn->cdev)) 204b5a9ee7cSAriel Elior flags |= PQ_FLAGS_VFS; 205fe56b9e6SYuval Mintz 206b5a9ee7cSAriel Elior /* protocol flags */ 207b5a9ee7cSAriel Elior switch (p_hwfn->hw_info.personality) { 208b5a9ee7cSAriel Elior case QED_PCI_ETH: 209b5a9ee7cSAriel Elior flags |= PQ_FLAGS_MCOS; 210b5a9ee7cSAriel Elior break; 211b5a9ee7cSAriel Elior case QED_PCI_FCOE: 212b5a9ee7cSAriel Elior flags |= PQ_FLAGS_OFLD; 213b5a9ee7cSAriel Elior break; 214b5a9ee7cSAriel Elior case QED_PCI_ISCSI: 215b5a9ee7cSAriel Elior flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 216b5a9ee7cSAriel Elior break; 217b5a9ee7cSAriel Elior case QED_PCI_ETH_ROCE: 218b5a9ee7cSAriel Elior flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 219b5a9ee7cSAriel Elior break; 220b5a9ee7cSAriel Elior default: 221fe56b9e6SYuval Mintz DP_ERR(p_hwfn, 222b5a9ee7cSAriel Elior "unknown personality %d\n", p_hwfn->hw_info.personality); 223b5a9ee7cSAriel Elior return 0; 224fe56b9e6SYuval Mintz } 225fe56b9e6SYuval Mintz 226b5a9ee7cSAriel Elior return flags; 227b5a9ee7cSAriel Elior } 228b5a9ee7cSAriel Elior 229b5a9ee7cSAriel Elior /* Getters for resource amounts necessary for qm initialization */ 230b5a9ee7cSAriel Elior u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) 231b5a9ee7cSAriel Elior { 232b5a9ee7cSAriel Elior return p_hwfn->hw_info.num_hw_tc; 233b5a9ee7cSAriel Elior } 234b5a9ee7cSAriel Elior 235b5a9ee7cSAriel Elior u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) 236b5a9ee7cSAriel Elior { 237b5a9ee7cSAriel Elior return IS_QED_SRIOV(p_hwfn->cdev) ? 238b5a9ee7cSAriel Elior p_hwfn->cdev->p_iov_info->total_vfs : 0; 239b5a9ee7cSAriel Elior } 240b5a9ee7cSAriel Elior 241b5a9ee7cSAriel Elior #define NUM_DEFAULT_RLS 1 242b5a9ee7cSAriel Elior 243b5a9ee7cSAriel Elior u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) 244b5a9ee7cSAriel Elior { 245b5a9ee7cSAriel Elior u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); 246b5a9ee7cSAriel Elior 247b5a9ee7cSAriel Elior /* num RLs can't exceed resource amount of rls or vports */ 248b5a9ee7cSAriel Elior num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), 249b5a9ee7cSAriel Elior RESC_NUM(p_hwfn, QED_VPORT)); 250b5a9ee7cSAriel Elior 251b5a9ee7cSAriel Elior /* Make sure after we reserve there's something left */ 252b5a9ee7cSAriel Elior if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) 253b5a9ee7cSAriel Elior return 0; 254b5a9ee7cSAriel Elior 255b5a9ee7cSAriel Elior /* subtract rls necessary for VFs and one default one for the PF */ 256b5a9ee7cSAriel Elior num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 257b5a9ee7cSAriel Elior 258b5a9ee7cSAriel Elior return num_pf_rls; 259b5a9ee7cSAriel Elior } 260b5a9ee7cSAriel Elior 261b5a9ee7cSAriel Elior u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) 262b5a9ee7cSAriel Elior { 263b5a9ee7cSAriel Elior u32 pq_flags = qed_get_pq_flags(p_hwfn); 264b5a9ee7cSAriel Elior 265b5a9ee7cSAriel Elior /* all pqs share the same vport, except for vfs and pf_rl pqs */ 266b5a9ee7cSAriel Elior return (!!(PQ_FLAGS_RLS & pq_flags)) * 267b5a9ee7cSAriel Elior qed_init_qm_get_num_pf_rls(p_hwfn) + 268b5a9ee7cSAriel Elior (!!(PQ_FLAGS_VFS & pq_flags)) * 269b5a9ee7cSAriel Elior qed_init_qm_get_num_vfs(p_hwfn) + 1; 270b5a9ee7cSAriel Elior } 271b5a9ee7cSAriel Elior 272b5a9ee7cSAriel Elior /* calc amount of PQs according to the requested flags */ 273b5a9ee7cSAriel Elior u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) 274b5a9ee7cSAriel Elior { 275b5a9ee7cSAriel Elior u32 pq_flags = qed_get_pq_flags(p_hwfn); 276b5a9ee7cSAriel Elior 277b5a9ee7cSAriel Elior return (!!(PQ_FLAGS_RLS & pq_flags)) * 278b5a9ee7cSAriel Elior qed_init_qm_get_num_pf_rls(p_hwfn) + 279b5a9ee7cSAriel Elior (!!(PQ_FLAGS_MCOS & pq_flags)) * 280b5a9ee7cSAriel Elior qed_init_qm_get_num_tcs(p_hwfn) + 281b5a9ee7cSAriel Elior (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) + 282b5a9ee7cSAriel Elior (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) + 283b5a9ee7cSAriel Elior (!!(PQ_FLAGS_LLT & pq_flags)) + 284b5a9ee7cSAriel Elior (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn); 285b5a9ee7cSAriel Elior } 286b5a9ee7cSAriel Elior 287b5a9ee7cSAriel Elior /* initialize the top level QM params */ 288b5a9ee7cSAriel Elior static void qed_init_qm_params(struct qed_hwfn *p_hwfn) 289b5a9ee7cSAriel Elior { 290b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 291b5a9ee7cSAriel Elior bool four_port; 292b5a9ee7cSAriel Elior 293b5a9ee7cSAriel Elior /* pq and vport bases for this PF */ 294b5a9ee7cSAriel Elior qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); 295b5a9ee7cSAriel Elior qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); 296b5a9ee7cSAriel Elior 297b5a9ee7cSAriel Elior /* rate limiting and weighted fair queueing are always enabled */ 298b5a9ee7cSAriel Elior qm_info->vport_rl_en = 1; 299b5a9ee7cSAriel Elior qm_info->vport_wfq_en = 1; 300b5a9ee7cSAriel Elior 301b5a9ee7cSAriel Elior /* TC config is different for AH 4 port */ 302b5a9ee7cSAriel Elior four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2; 303b5a9ee7cSAriel Elior 304b5a9ee7cSAriel Elior /* in AH 4 port we have fewer TCs per port */ 305b5a9ee7cSAriel Elior qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : 306b5a9ee7cSAriel Elior NUM_OF_PHYS_TCS; 307b5a9ee7cSAriel Elior 308b5a9ee7cSAriel Elior /* unless MFW indicated otherwise, ooo_tc == 3 for 309b5a9ee7cSAriel Elior * AH 4-port and 4 otherwise. 310fe56b9e6SYuval Mintz */ 311b5a9ee7cSAriel Elior if (!qm_info->ooo_tc) 312b5a9ee7cSAriel Elior qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : 313b5a9ee7cSAriel Elior DCBX_TCP_OOO_TC; 314dbb799c3SYuval Mintz } 315dbb799c3SYuval Mintz 316b5a9ee7cSAriel Elior /* initialize qm vport params */ 317b5a9ee7cSAriel Elior static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) 318b5a9ee7cSAriel Elior { 319b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 320b5a9ee7cSAriel Elior u8 i; 321fe56b9e6SYuval Mintz 322b5a9ee7cSAriel Elior /* all vports participate in weighted fair queueing */ 323b5a9ee7cSAriel Elior for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++) 324b5a9ee7cSAriel Elior qm_info->qm_vport_params[i].vport_wfq = 1; 325fe56b9e6SYuval Mintz } 326fe56b9e6SYuval Mintz 327b5a9ee7cSAriel Elior /* initialize qm port params */ 328b5a9ee7cSAriel Elior static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) 329b5a9ee7cSAriel Elior { 330fe56b9e6SYuval Mintz /* Initialize qm port parameters */ 331b5a9ee7cSAriel Elior u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines; 332b5a9ee7cSAriel Elior 333b5a9ee7cSAriel Elior /* indicate how ooo and high pri traffic is dealt with */ 334b5a9ee7cSAriel Elior active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 335b5a9ee7cSAriel Elior ACTIVE_TCS_BMAP_4PORT_K2 : 336b5a9ee7cSAriel Elior ACTIVE_TCS_BMAP; 337b5a9ee7cSAriel Elior 338fe56b9e6SYuval Mintz for (i = 0; i < num_ports; i++) { 339b5a9ee7cSAriel Elior struct init_qm_port_params *p_qm_port = 340b5a9ee7cSAriel Elior &p_hwfn->qm_info.qm_port_params[i]; 341b5a9ee7cSAriel Elior 342fe56b9e6SYuval Mintz p_qm_port->active = 1; 343b5a9ee7cSAriel Elior p_qm_port->active_phys_tcs = active_phys_tcs; 344fe56b9e6SYuval Mintz p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; 345fe56b9e6SYuval Mintz p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 346fe56b9e6SYuval Mintz } 347b5a9ee7cSAriel Elior } 348fe56b9e6SYuval Mintz 349b5a9ee7cSAriel Elior /* Reset the params which must be reset for qm init. QM init may be called as 350b5a9ee7cSAriel Elior * a result of flows other than driver load (e.g. dcbx renegotiation). Other 351b5a9ee7cSAriel Elior * params may be affected by the init but would simply recalculate to the same 352b5a9ee7cSAriel Elior * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 353b5a9ee7cSAriel Elior * affected as these amounts stay the same. 354b5a9ee7cSAriel Elior */ 355b5a9ee7cSAriel Elior static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn) 356b5a9ee7cSAriel Elior { 357b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 358fe56b9e6SYuval Mintz 359b5a9ee7cSAriel Elior qm_info->num_pqs = 0; 360b5a9ee7cSAriel Elior qm_info->num_vports = 0; 361b5a9ee7cSAriel Elior qm_info->num_pf_rls = 0; 362b5a9ee7cSAriel Elior qm_info->num_vf_pqs = 0; 363b5a9ee7cSAriel Elior qm_info->first_vf_pq = 0; 364b5a9ee7cSAriel Elior qm_info->first_mcos_pq = 0; 365b5a9ee7cSAriel Elior qm_info->first_rl_pq = 0; 366b5a9ee7cSAriel Elior } 367fe56b9e6SYuval Mintz 368b5a9ee7cSAriel Elior static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) 369b5a9ee7cSAriel Elior { 370b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 371b5a9ee7cSAriel Elior 372b5a9ee7cSAriel Elior qm_info->num_vports++; 373b5a9ee7cSAriel Elior 374b5a9ee7cSAriel Elior if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) 375b5a9ee7cSAriel Elior DP_ERR(p_hwfn, 376b5a9ee7cSAriel Elior "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", 377b5a9ee7cSAriel Elior qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); 378b5a9ee7cSAriel Elior } 379b5a9ee7cSAriel Elior 380b5a9ee7cSAriel Elior /* initialize a single pq and manage qm_info resources accounting. 381b5a9ee7cSAriel Elior * The pq_init_flags param determines whether the PQ is rate limited 382b5a9ee7cSAriel Elior * (for VF or PF) and whether a new vport is allocated to the pq or not 383b5a9ee7cSAriel Elior * (i.e. vport will be shared). 384b5a9ee7cSAriel Elior */ 385b5a9ee7cSAriel Elior 386b5a9ee7cSAriel Elior /* flags for pq init */ 387b5a9ee7cSAriel Elior #define PQ_INIT_SHARE_VPORT (1 << 0) 388b5a9ee7cSAriel Elior #define PQ_INIT_PF_RL (1 << 1) 389b5a9ee7cSAriel Elior #define PQ_INIT_VF_RL (1 << 2) 390b5a9ee7cSAriel Elior 391b5a9ee7cSAriel Elior /* defines for pq init */ 392b5a9ee7cSAriel Elior #define PQ_INIT_DEFAULT_WRR_GROUP 1 393b5a9ee7cSAriel Elior #define PQ_INIT_DEFAULT_TC 0 394b5a9ee7cSAriel Elior #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 395b5a9ee7cSAriel Elior 396b5a9ee7cSAriel Elior static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, 397b5a9ee7cSAriel Elior struct qed_qm_info *qm_info, 398b5a9ee7cSAriel Elior u8 tc, u32 pq_init_flags) 399b5a9ee7cSAriel Elior { 400b5a9ee7cSAriel Elior u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn); 401b5a9ee7cSAriel Elior 402b5a9ee7cSAriel Elior if (pq_idx > max_pq) 403b5a9ee7cSAriel Elior DP_ERR(p_hwfn, 404b5a9ee7cSAriel Elior "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 405b5a9ee7cSAriel Elior 406b5a9ee7cSAriel Elior /* init pq params */ 407b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + 408b5a9ee7cSAriel Elior qm_info->num_vports; 409b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].tc_id = tc; 410b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 411b5a9ee7cSAriel Elior qm_info->qm_pq_params[pq_idx].rl_valid = 412b5a9ee7cSAriel Elior (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); 413b5a9ee7cSAriel Elior 414b5a9ee7cSAriel Elior /* qm params accounting */ 415b5a9ee7cSAriel Elior qm_info->num_pqs++; 416b5a9ee7cSAriel Elior if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 417b5a9ee7cSAriel Elior qm_info->num_vports++; 418b5a9ee7cSAriel Elior 419b5a9ee7cSAriel Elior if (pq_init_flags & PQ_INIT_PF_RL) 420b5a9ee7cSAriel Elior qm_info->num_pf_rls++; 421b5a9ee7cSAriel Elior 422b5a9ee7cSAriel Elior if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) 423b5a9ee7cSAriel Elior DP_ERR(p_hwfn, 424b5a9ee7cSAriel Elior "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", 425b5a9ee7cSAriel Elior qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); 426b5a9ee7cSAriel Elior 427b5a9ee7cSAriel Elior if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn)) 428b5a9ee7cSAriel Elior DP_ERR(p_hwfn, 429b5a9ee7cSAriel Elior "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", 430b5a9ee7cSAriel Elior qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn)); 431b5a9ee7cSAriel Elior } 432b5a9ee7cSAriel Elior 433b5a9ee7cSAriel Elior /* get pq index according to PQ_FLAGS */ 434b5a9ee7cSAriel Elior static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, 435b5a9ee7cSAriel Elior u32 pq_flags) 436b5a9ee7cSAriel Elior { 437b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 438b5a9ee7cSAriel Elior 439b5a9ee7cSAriel Elior /* Can't have multiple flags set here */ 440b5a9ee7cSAriel Elior if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 441b5a9ee7cSAriel Elior goto err; 442b5a9ee7cSAriel Elior 443b5a9ee7cSAriel Elior switch (pq_flags) { 444b5a9ee7cSAriel Elior case PQ_FLAGS_RLS: 445b5a9ee7cSAriel Elior return &qm_info->first_rl_pq; 446b5a9ee7cSAriel Elior case PQ_FLAGS_MCOS: 447b5a9ee7cSAriel Elior return &qm_info->first_mcos_pq; 448b5a9ee7cSAriel Elior case PQ_FLAGS_LB: 449b5a9ee7cSAriel Elior return &qm_info->pure_lb_pq; 450b5a9ee7cSAriel Elior case PQ_FLAGS_OOO: 451b5a9ee7cSAriel Elior return &qm_info->ooo_pq; 452b5a9ee7cSAriel Elior case PQ_FLAGS_ACK: 453b5a9ee7cSAriel Elior return &qm_info->pure_ack_pq; 454b5a9ee7cSAriel Elior case PQ_FLAGS_OFLD: 455b5a9ee7cSAriel Elior return &qm_info->offload_pq; 456b5a9ee7cSAriel Elior case PQ_FLAGS_LLT: 457b5a9ee7cSAriel Elior return &qm_info->low_latency_pq; 458b5a9ee7cSAriel Elior case PQ_FLAGS_VFS: 459b5a9ee7cSAriel Elior return &qm_info->first_vf_pq; 460b5a9ee7cSAriel Elior default: 461b5a9ee7cSAriel Elior goto err; 462b5a9ee7cSAriel Elior } 463b5a9ee7cSAriel Elior 464b5a9ee7cSAriel Elior err: 465b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 466b5a9ee7cSAriel Elior return NULL; 467b5a9ee7cSAriel Elior } 468b5a9ee7cSAriel Elior 469b5a9ee7cSAriel Elior /* save pq index in qm info */ 470b5a9ee7cSAriel Elior static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn, 471b5a9ee7cSAriel Elior u32 pq_flags, u16 pq_val) 472b5a9ee7cSAriel Elior { 473b5a9ee7cSAriel Elior u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 474b5a9ee7cSAriel Elior 475b5a9ee7cSAriel Elior *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 476b5a9ee7cSAriel Elior } 477b5a9ee7cSAriel Elior 478b5a9ee7cSAriel Elior /* get tx pq index, with the PQ TX base already set (ready for context init) */ 479b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags) 480b5a9ee7cSAriel Elior { 481b5a9ee7cSAriel Elior u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 482b5a9ee7cSAriel Elior 483b5a9ee7cSAriel Elior return *base_pq_idx + CM_TX_PQ_BASE; 484b5a9ee7cSAriel Elior } 485b5a9ee7cSAriel Elior 486b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) 487b5a9ee7cSAriel Elior { 488b5a9ee7cSAriel Elior u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); 489b5a9ee7cSAriel Elior 490b5a9ee7cSAriel Elior if (tc > max_tc) 491b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 492b5a9ee7cSAriel Elior 493b5a9ee7cSAriel Elior return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 494b5a9ee7cSAriel Elior } 495b5a9ee7cSAriel Elior 496b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) 497b5a9ee7cSAriel Elior { 498b5a9ee7cSAriel Elior u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); 499b5a9ee7cSAriel Elior 500b5a9ee7cSAriel Elior if (vf > max_vf) 501b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 502b5a9ee7cSAriel Elior 503b5a9ee7cSAriel Elior return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 504b5a9ee7cSAriel Elior } 505b5a9ee7cSAriel Elior 506b5a9ee7cSAriel Elior u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl) 507b5a9ee7cSAriel Elior { 508b5a9ee7cSAriel Elior u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn); 509b5a9ee7cSAriel Elior 510b5a9ee7cSAriel Elior if (rl > max_rl) 511b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 512b5a9ee7cSAriel Elior 513b5a9ee7cSAriel Elior return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 514b5a9ee7cSAriel Elior } 515b5a9ee7cSAriel Elior 516b5a9ee7cSAriel Elior /* Functions for creating specific types of pqs */ 517b5a9ee7cSAriel Elior static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) 518b5a9ee7cSAriel Elior { 519b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 520b5a9ee7cSAriel Elior 521b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 522b5a9ee7cSAriel Elior return; 523b5a9ee7cSAriel Elior 524b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 525b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 526b5a9ee7cSAriel Elior } 527b5a9ee7cSAriel Elior 528b5a9ee7cSAriel Elior static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn) 529b5a9ee7cSAriel Elior { 530b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 531b5a9ee7cSAriel Elior 532b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 533b5a9ee7cSAriel Elior return; 534b5a9ee7cSAriel Elior 535b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 536b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 537b5a9ee7cSAriel Elior } 538b5a9ee7cSAriel Elior 539b5a9ee7cSAriel Elior static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn) 540b5a9ee7cSAriel Elior { 541b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 542b5a9ee7cSAriel Elior 543b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 544b5a9ee7cSAriel Elior return; 545b5a9ee7cSAriel Elior 546b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 547b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 548b5a9ee7cSAriel Elior } 549b5a9ee7cSAriel Elior 550b5a9ee7cSAriel Elior static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) 551b5a9ee7cSAriel Elior { 552b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 553b5a9ee7cSAriel Elior 554b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 555b5a9ee7cSAriel Elior return; 556b5a9ee7cSAriel Elior 557b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 558b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 559b5a9ee7cSAriel Elior } 560b5a9ee7cSAriel Elior 561b5a9ee7cSAriel Elior static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) 562b5a9ee7cSAriel Elior { 563b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 564b5a9ee7cSAriel Elior 565b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) 566b5a9ee7cSAriel Elior return; 567b5a9ee7cSAriel Elior 568b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); 569b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 570b5a9ee7cSAriel Elior } 571b5a9ee7cSAriel Elior 572b5a9ee7cSAriel Elior static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn) 573b5a9ee7cSAriel Elior { 574b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 575b5a9ee7cSAriel Elior u8 tc_idx; 576b5a9ee7cSAriel Elior 577b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 578b5a9ee7cSAriel Elior return; 579b5a9ee7cSAriel Elior 580b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 581b5a9ee7cSAriel Elior for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++) 582b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 583b5a9ee7cSAriel Elior } 584b5a9ee7cSAriel Elior 585b5a9ee7cSAriel Elior static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn) 586b5a9ee7cSAriel Elior { 587b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 588b5a9ee7cSAriel Elior u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); 589b5a9ee7cSAriel Elior 590b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 591b5a9ee7cSAriel Elior return; 592b5a9ee7cSAriel Elior 593b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 5941408cc1fSYuval Mintz qm_info->num_vf_pqs = num_vfs; 595b5a9ee7cSAriel Elior for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 596b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, 597b5a9ee7cSAriel Elior qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); 598b5a9ee7cSAriel Elior } 599fe56b9e6SYuval Mintz 600b5a9ee7cSAriel Elior static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn) 601b5a9ee7cSAriel Elior { 602b5a9ee7cSAriel Elior u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn); 603b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 604a64b02d5SManish Chopra 605b5a9ee7cSAriel Elior if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 606b5a9ee7cSAriel Elior return; 607b5a9ee7cSAriel Elior 608b5a9ee7cSAriel Elior qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 609b5a9ee7cSAriel Elior for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 610b5a9ee7cSAriel Elior qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); 611b5a9ee7cSAriel Elior } 612b5a9ee7cSAriel Elior 613b5a9ee7cSAriel Elior static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn) 614b5a9ee7cSAriel Elior { 615b5a9ee7cSAriel Elior /* rate limited pqs, must come first (FW assumption) */ 616b5a9ee7cSAriel Elior qed_init_qm_rl_pqs(p_hwfn); 617b5a9ee7cSAriel Elior 618b5a9ee7cSAriel Elior /* pqs for multi cos */ 619b5a9ee7cSAriel Elior qed_init_qm_mcos_pqs(p_hwfn); 620b5a9ee7cSAriel Elior 621b5a9ee7cSAriel Elior /* pure loopback pq */ 622b5a9ee7cSAriel Elior qed_init_qm_lb_pq(p_hwfn); 623b5a9ee7cSAriel Elior 624b5a9ee7cSAriel Elior /* out of order pq */ 625b5a9ee7cSAriel Elior qed_init_qm_ooo_pq(p_hwfn); 626b5a9ee7cSAriel Elior 627b5a9ee7cSAriel Elior /* pure ack pq */ 628b5a9ee7cSAriel Elior qed_init_qm_pure_ack_pq(p_hwfn); 629b5a9ee7cSAriel Elior 630b5a9ee7cSAriel Elior /* pq for offloaded protocol */ 631b5a9ee7cSAriel Elior qed_init_qm_offload_pq(p_hwfn); 632b5a9ee7cSAriel Elior 633b5a9ee7cSAriel Elior /* low latency pq */ 634b5a9ee7cSAriel Elior qed_init_qm_low_latency_pq(p_hwfn); 635b5a9ee7cSAriel Elior 636b5a9ee7cSAriel Elior /* done sharing vports */ 637b5a9ee7cSAriel Elior qed_init_qm_advance_vport(p_hwfn); 638b5a9ee7cSAriel Elior 639b5a9ee7cSAriel Elior /* pqs for vfs */ 640b5a9ee7cSAriel Elior qed_init_qm_vf_pqs(p_hwfn); 641b5a9ee7cSAriel Elior } 642b5a9ee7cSAriel Elior 643b5a9ee7cSAriel Elior /* compare values of getters against resources amounts */ 644b5a9ee7cSAriel Elior static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn) 645b5a9ee7cSAriel Elior { 646b5a9ee7cSAriel Elior if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) { 647b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 648b5a9ee7cSAriel Elior return -EINVAL; 649b5a9ee7cSAriel Elior } 650b5a9ee7cSAriel Elior 651b5a9ee7cSAriel Elior if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) { 652b5a9ee7cSAriel Elior DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 653b5a9ee7cSAriel Elior return -EINVAL; 654b5a9ee7cSAriel Elior } 655fe56b9e6SYuval Mintz 656fe56b9e6SYuval Mintz return 0; 657b5a9ee7cSAriel Elior } 658fe56b9e6SYuval Mintz 659b5a9ee7cSAriel Elior static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) 660b5a9ee7cSAriel Elior { 661b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 662b5a9ee7cSAriel Elior struct init_qm_vport_params *vport; 663b5a9ee7cSAriel Elior struct init_qm_port_params *port; 664b5a9ee7cSAriel Elior struct init_qm_pq_params *pq; 665b5a9ee7cSAriel Elior int i, tc; 666b5a9ee7cSAriel Elior 667b5a9ee7cSAriel Elior /* top level params */ 668b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 669b5a9ee7cSAriel Elior NETIF_MSG_HW, 670b5a9ee7cSAriel Elior "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 671b5a9ee7cSAriel Elior qm_info->start_pq, 672b5a9ee7cSAriel Elior qm_info->start_vport, 673b5a9ee7cSAriel Elior qm_info->pure_lb_pq, 674b5a9ee7cSAriel Elior qm_info->offload_pq, qm_info->pure_ack_pq); 675b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 676b5a9ee7cSAriel Elior NETIF_MSG_HW, 677b5a9ee7cSAriel Elior "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", 678b5a9ee7cSAriel Elior qm_info->ooo_pq, 679b5a9ee7cSAriel Elior qm_info->first_vf_pq, 680b5a9ee7cSAriel Elior qm_info->num_pqs, 681b5a9ee7cSAriel Elior qm_info->num_vf_pqs, 682b5a9ee7cSAriel Elior qm_info->num_vports, qm_info->max_phys_tcs_per_port); 683b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 684b5a9ee7cSAriel Elior NETIF_MSG_HW, 685b5a9ee7cSAriel Elior "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 686b5a9ee7cSAriel Elior qm_info->pf_rl_en, 687b5a9ee7cSAriel Elior qm_info->pf_wfq_en, 688b5a9ee7cSAriel Elior qm_info->vport_rl_en, 689b5a9ee7cSAriel Elior qm_info->vport_wfq_en, 690b5a9ee7cSAriel Elior qm_info->pf_wfq, 691b5a9ee7cSAriel Elior qm_info->pf_rl, 692b5a9ee7cSAriel Elior qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn)); 693b5a9ee7cSAriel Elior 694b5a9ee7cSAriel Elior /* port table */ 695b5a9ee7cSAriel Elior for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) { 696b5a9ee7cSAriel Elior port = &(qm_info->qm_port_params[i]); 697b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 698b5a9ee7cSAriel Elior NETIF_MSG_HW, 699b5a9ee7cSAriel Elior "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", 700b5a9ee7cSAriel Elior i, 701b5a9ee7cSAriel Elior port->active, 702b5a9ee7cSAriel Elior port->active_phys_tcs, 703b5a9ee7cSAriel Elior port->num_pbf_cmd_lines, 704b5a9ee7cSAriel Elior port->num_btb_blocks, port->reserved); 705b5a9ee7cSAriel Elior } 706b5a9ee7cSAriel Elior 707b5a9ee7cSAriel Elior /* vport table */ 708b5a9ee7cSAriel Elior for (i = 0; i < qm_info->num_vports; i++) { 709b5a9ee7cSAriel Elior vport = &(qm_info->qm_vport_params[i]); 710b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 711b5a9ee7cSAriel Elior NETIF_MSG_HW, 712b5a9ee7cSAriel Elior "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", 713b5a9ee7cSAriel Elior qm_info->start_vport + i, 714b5a9ee7cSAriel Elior vport->vport_rl, vport->vport_wfq); 715b5a9ee7cSAriel Elior for (tc = 0; tc < NUM_OF_TCS; tc++) 716b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 717b5a9ee7cSAriel Elior NETIF_MSG_HW, 718b5a9ee7cSAriel Elior "%d ", vport->first_tx_pq_id[tc]); 719b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n"); 720b5a9ee7cSAriel Elior } 721b5a9ee7cSAriel Elior 722b5a9ee7cSAriel Elior /* pq table */ 723b5a9ee7cSAriel Elior for (i = 0; i < qm_info->num_pqs; i++) { 724b5a9ee7cSAriel Elior pq = &(qm_info->qm_pq_params[i]); 725b5a9ee7cSAriel Elior DP_VERBOSE(p_hwfn, 726b5a9ee7cSAriel Elior NETIF_MSG_HW, 727b5a9ee7cSAriel Elior "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", 728b5a9ee7cSAriel Elior qm_info->start_pq + i, 729b5a9ee7cSAriel Elior pq->vport_id, 730b5a9ee7cSAriel Elior pq->tc_id, pq->wrr_group, pq->rl_valid); 731b5a9ee7cSAriel Elior } 732b5a9ee7cSAriel Elior } 733b5a9ee7cSAriel Elior 734b5a9ee7cSAriel Elior static void qed_init_qm_info(struct qed_hwfn *p_hwfn) 735b5a9ee7cSAriel Elior { 736b5a9ee7cSAriel Elior /* reset params required for init run */ 737b5a9ee7cSAriel Elior qed_init_qm_reset_params(p_hwfn); 738b5a9ee7cSAriel Elior 739b5a9ee7cSAriel Elior /* init QM top level params */ 740b5a9ee7cSAriel Elior qed_init_qm_params(p_hwfn); 741b5a9ee7cSAriel Elior 742b5a9ee7cSAriel Elior /* init QM port params */ 743b5a9ee7cSAriel Elior qed_init_qm_port_params(p_hwfn); 744b5a9ee7cSAriel Elior 745b5a9ee7cSAriel Elior /* init QM vport params */ 746b5a9ee7cSAriel Elior qed_init_qm_vport_params(p_hwfn); 747b5a9ee7cSAriel Elior 748b5a9ee7cSAriel Elior /* init QM physical queue params */ 749b5a9ee7cSAriel Elior qed_init_qm_pq_params(p_hwfn); 750b5a9ee7cSAriel Elior 751b5a9ee7cSAriel Elior /* display all that init */ 752b5a9ee7cSAriel Elior qed_dp_init_qm_params(p_hwfn); 753fe56b9e6SYuval Mintz } 754fe56b9e6SYuval Mintz 75539651abdSSudarsana Reddy Kalluru /* This function reconfigures the QM pf on the fly. 75639651abdSSudarsana Reddy Kalluru * For this purpose we: 75739651abdSSudarsana Reddy Kalluru * 1. reconfigure the QM database 75839651abdSSudarsana Reddy Kalluru * 2. set new values to runtime arrat 75939651abdSSudarsana Reddy Kalluru * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 76039651abdSSudarsana Reddy Kalluru * 4. activate init tool in QM_PF stage 76139651abdSSudarsana Reddy Kalluru * 5. send an sdm_qm_cmd through rbc interface to release the QM 76239651abdSSudarsana Reddy Kalluru */ 76339651abdSSudarsana Reddy Kalluru int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 76439651abdSSudarsana Reddy Kalluru { 76539651abdSSudarsana Reddy Kalluru struct qed_qm_info *qm_info = &p_hwfn->qm_info; 76639651abdSSudarsana Reddy Kalluru bool b_rc; 76739651abdSSudarsana Reddy Kalluru int rc; 76839651abdSSudarsana Reddy Kalluru 76939651abdSSudarsana Reddy Kalluru /* initialize qed's qm data structure */ 770b5a9ee7cSAriel Elior qed_init_qm_info(p_hwfn); 77139651abdSSudarsana Reddy Kalluru 77239651abdSSudarsana Reddy Kalluru /* stop PF's qm queues */ 77339651abdSSudarsana Reddy Kalluru spin_lock_bh(&qm_lock); 77439651abdSSudarsana Reddy Kalluru b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 77539651abdSSudarsana Reddy Kalluru qm_info->start_pq, qm_info->num_pqs); 77639651abdSSudarsana Reddy Kalluru spin_unlock_bh(&qm_lock); 77739651abdSSudarsana Reddy Kalluru if (!b_rc) 77839651abdSSudarsana Reddy Kalluru return -EINVAL; 77939651abdSSudarsana Reddy Kalluru 78039651abdSSudarsana Reddy Kalluru /* clear the QM_PF runtime phase leftovers from previous init */ 78139651abdSSudarsana Reddy Kalluru qed_init_clear_rt_data(p_hwfn); 78239651abdSSudarsana Reddy Kalluru 78339651abdSSudarsana Reddy Kalluru /* prepare QM portion of runtime array */ 78415582962SRahul Verma qed_qm_init_pf(p_hwfn, p_ptt); 78539651abdSSudarsana Reddy Kalluru 78639651abdSSudarsana Reddy Kalluru /* activate init tool on runtime array */ 78739651abdSSudarsana Reddy Kalluru rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 78839651abdSSudarsana Reddy Kalluru p_hwfn->hw_info.hw_mode); 78939651abdSSudarsana Reddy Kalluru if (rc) 79039651abdSSudarsana Reddy Kalluru return rc; 79139651abdSSudarsana Reddy Kalluru 79239651abdSSudarsana Reddy Kalluru /* start PF's qm queues */ 79339651abdSSudarsana Reddy Kalluru spin_lock_bh(&qm_lock); 79439651abdSSudarsana Reddy Kalluru b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 79539651abdSSudarsana Reddy Kalluru qm_info->start_pq, qm_info->num_pqs); 79639651abdSSudarsana Reddy Kalluru spin_unlock_bh(&qm_lock); 79739651abdSSudarsana Reddy Kalluru if (!b_rc) 79839651abdSSudarsana Reddy Kalluru return -EINVAL; 79939651abdSSudarsana Reddy Kalluru 80039651abdSSudarsana Reddy Kalluru return 0; 80139651abdSSudarsana Reddy Kalluru } 80239651abdSSudarsana Reddy Kalluru 803b5a9ee7cSAriel Elior static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) 804b5a9ee7cSAriel Elior { 805b5a9ee7cSAriel Elior struct qed_qm_info *qm_info = &p_hwfn->qm_info; 806b5a9ee7cSAriel Elior int rc; 807b5a9ee7cSAriel Elior 808b5a9ee7cSAriel Elior rc = qed_init_qm_sanity(p_hwfn); 809b5a9ee7cSAriel Elior if (rc) 810b5a9ee7cSAriel Elior goto alloc_err; 811b5a9ee7cSAriel Elior 812b5a9ee7cSAriel Elior qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * 813b5a9ee7cSAriel Elior qed_init_qm_get_num_pqs(p_hwfn), 814b5a9ee7cSAriel Elior GFP_KERNEL); 815b5a9ee7cSAriel Elior if (!qm_info->qm_pq_params) 816b5a9ee7cSAriel Elior goto alloc_err; 817b5a9ee7cSAriel Elior 818b5a9ee7cSAriel Elior qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * 819b5a9ee7cSAriel Elior qed_init_qm_get_num_vports(p_hwfn), 820b5a9ee7cSAriel Elior GFP_KERNEL); 821b5a9ee7cSAriel Elior if (!qm_info->qm_vport_params) 822b5a9ee7cSAriel Elior goto alloc_err; 823b5a9ee7cSAriel Elior 8242f7878c0SWei Yongjun qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * 825b5a9ee7cSAriel Elior p_hwfn->cdev->num_ports_in_engines, 826b5a9ee7cSAriel Elior GFP_KERNEL); 827b5a9ee7cSAriel Elior if (!qm_info->qm_port_params) 828b5a9ee7cSAriel Elior goto alloc_err; 829b5a9ee7cSAriel Elior 830b5a9ee7cSAriel Elior qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) * 831b5a9ee7cSAriel Elior qed_init_qm_get_num_vports(p_hwfn), 832b5a9ee7cSAriel Elior GFP_KERNEL); 833b5a9ee7cSAriel Elior if (!qm_info->wfq_data) 834b5a9ee7cSAriel Elior goto alloc_err; 835b5a9ee7cSAriel Elior 836b5a9ee7cSAriel Elior return 0; 837b5a9ee7cSAriel Elior 838b5a9ee7cSAriel Elior alloc_err: 839b5a9ee7cSAriel Elior DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); 840b5a9ee7cSAriel Elior qed_qm_info_free(p_hwfn); 841b5a9ee7cSAriel Elior return -ENOMEM; 842b5a9ee7cSAriel Elior } 843b5a9ee7cSAriel Elior 844fe56b9e6SYuval Mintz int qed_resc_alloc(struct qed_dev *cdev) 845fe56b9e6SYuval Mintz { 846fc831825SYuval Mintz struct qed_iscsi_info *p_iscsi_info; 8471e128c81SArun Easi struct qed_fcoe_info *p_fcoe_info; 8481d6cff4fSYuval Mintz struct qed_ooo_info *p_ooo_info; 8490a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2 8500a7fb11cSYuval Mintz struct qed_ll2_info *p_ll2_info; 8510a7fb11cSYuval Mintz #endif 852f9dc4d1fSRam Amrani u32 rdma_tasks, excess_tasks; 853fe56b9e6SYuval Mintz struct qed_consq *p_consq; 854fe56b9e6SYuval Mintz struct qed_eq *p_eq; 855f9dc4d1fSRam Amrani u32 line_count; 856fe56b9e6SYuval Mintz int i, rc = 0; 857fe56b9e6SYuval Mintz 8581408cc1fSYuval Mintz if (IS_VF(cdev)) 8591408cc1fSYuval Mintz return rc; 8601408cc1fSYuval Mintz 861fe56b9e6SYuval Mintz cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); 862fe56b9e6SYuval Mintz if (!cdev->fw_data) 863fe56b9e6SYuval Mintz return -ENOMEM; 864fe56b9e6SYuval Mintz 865fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 866fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 867dbb799c3SYuval Mintz u32 n_eqes, num_cons; 868fe56b9e6SYuval Mintz 869fe56b9e6SYuval Mintz /* First allocate the context manager structure */ 870fe56b9e6SYuval Mintz rc = qed_cxt_mngr_alloc(p_hwfn); 871fe56b9e6SYuval Mintz if (rc) 872fe56b9e6SYuval Mintz goto alloc_err; 873fe56b9e6SYuval Mintz 874fe56b9e6SYuval Mintz /* Set the HW cid/tid numbers (in the contest manager) 875fe56b9e6SYuval Mintz * Must be done prior to any further computations. 876fe56b9e6SYuval Mintz */ 877f9dc4d1fSRam Amrani rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); 878fe56b9e6SYuval Mintz if (rc) 879fe56b9e6SYuval Mintz goto alloc_err; 880fe56b9e6SYuval Mintz 881b5a9ee7cSAriel Elior rc = qed_alloc_qm_data(p_hwfn); 882fe56b9e6SYuval Mintz if (rc) 883fe56b9e6SYuval Mintz goto alloc_err; 884fe56b9e6SYuval Mintz 885b5a9ee7cSAriel Elior /* init qm info */ 886b5a9ee7cSAriel Elior qed_init_qm_info(p_hwfn); 887b5a9ee7cSAriel Elior 888fe56b9e6SYuval Mintz /* Compute the ILT client partition */ 889f9dc4d1fSRam Amrani rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); 890f9dc4d1fSRam Amrani if (rc) { 891f9dc4d1fSRam Amrani DP_NOTICE(p_hwfn, 892f9dc4d1fSRam Amrani "too many ILT lines; re-computing with less lines\n"); 893f9dc4d1fSRam Amrani /* In case there are not enough ILT lines we reduce the 894f9dc4d1fSRam Amrani * number of RDMA tasks and re-compute. 895f9dc4d1fSRam Amrani */ 896f9dc4d1fSRam Amrani excess_tasks = 897f9dc4d1fSRam Amrani qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count); 898f9dc4d1fSRam Amrani if (!excess_tasks) 899f9dc4d1fSRam Amrani goto alloc_err; 900f9dc4d1fSRam Amrani 901f9dc4d1fSRam Amrani rdma_tasks = RDMA_MAX_TIDS - excess_tasks; 902f9dc4d1fSRam Amrani rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks); 903fe56b9e6SYuval Mintz if (rc) 904fe56b9e6SYuval Mintz goto alloc_err; 905fe56b9e6SYuval Mintz 906f9dc4d1fSRam Amrani rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); 907f9dc4d1fSRam Amrani if (rc) { 908f9dc4d1fSRam Amrani DP_ERR(p_hwfn, 909f9dc4d1fSRam Amrani "failed ILT compute. Requested too many lines: %u\n", 910f9dc4d1fSRam Amrani line_count); 911f9dc4d1fSRam Amrani 912f9dc4d1fSRam Amrani goto alloc_err; 913f9dc4d1fSRam Amrani } 914f9dc4d1fSRam Amrani } 915f9dc4d1fSRam Amrani 916fe56b9e6SYuval Mintz /* CID map / ILT shadow table / T2 917fe56b9e6SYuval Mintz * The talbes sizes are determined by the computations above 918fe56b9e6SYuval Mintz */ 919fe56b9e6SYuval Mintz rc = qed_cxt_tables_alloc(p_hwfn); 920fe56b9e6SYuval Mintz if (rc) 921fe56b9e6SYuval Mintz goto alloc_err; 922fe56b9e6SYuval Mintz 923fe56b9e6SYuval Mintz /* SPQ, must follow ILT because initializes SPQ context */ 924fe56b9e6SYuval Mintz rc = qed_spq_alloc(p_hwfn); 925fe56b9e6SYuval Mintz if (rc) 926fe56b9e6SYuval Mintz goto alloc_err; 927fe56b9e6SYuval Mintz 928fe56b9e6SYuval Mintz /* SP status block allocation */ 929fe56b9e6SYuval Mintz p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn, 930fe56b9e6SYuval Mintz RESERVED_PTT_DPC); 931fe56b9e6SYuval Mintz 932fe56b9e6SYuval Mintz rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 933fe56b9e6SYuval Mintz if (rc) 934fe56b9e6SYuval Mintz goto alloc_err; 935fe56b9e6SYuval Mintz 93632a47e72SYuval Mintz rc = qed_iov_alloc(p_hwfn); 93732a47e72SYuval Mintz if (rc) 93832a47e72SYuval Mintz goto alloc_err; 93932a47e72SYuval Mintz 940fe56b9e6SYuval Mintz /* EQ */ 941dbb799c3SYuval Mintz n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); 942dbb799c3SYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { 943dbb799c3SYuval Mintz num_cons = qed_cxt_get_proto_cid_count(p_hwfn, 944dbb799c3SYuval Mintz PROTOCOLID_ROCE, 9458c93beafSYuval Mintz NULL) * 2; 946dbb799c3SYuval Mintz n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 947dbb799c3SYuval Mintz } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 948dbb799c3SYuval Mintz num_cons = 949dbb799c3SYuval Mintz qed_cxt_get_proto_cid_count(p_hwfn, 9508c93beafSYuval Mintz PROTOCOLID_ISCSI, 9518c93beafSYuval Mintz NULL); 952dbb799c3SYuval Mintz n_eqes += 2 * num_cons; 953dbb799c3SYuval Mintz } 954dbb799c3SYuval Mintz 955dbb799c3SYuval Mintz if (n_eqes > 0xFFFF) { 956dbb799c3SYuval Mintz DP_ERR(p_hwfn, 957dbb799c3SYuval Mintz "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", 958dbb799c3SYuval Mintz n_eqes, 0xFFFF); 9591b4985b5SWei Yongjun rc = -EINVAL; 960fe56b9e6SYuval Mintz goto alloc_err; 9619b15acbfSDan Carpenter } 962dbb799c3SYuval Mintz 963dbb799c3SYuval Mintz p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes); 964dbb799c3SYuval Mintz if (!p_eq) 965dbb799c3SYuval Mintz goto alloc_no_mem; 966fe56b9e6SYuval Mintz p_hwfn->p_eq = p_eq; 967fe56b9e6SYuval Mintz 968fe56b9e6SYuval Mintz p_consq = qed_consq_alloc(p_hwfn); 969dbb799c3SYuval Mintz if (!p_consq) 970dbb799c3SYuval Mintz goto alloc_no_mem; 971fe56b9e6SYuval Mintz p_hwfn->p_consq = p_consq; 972fe56b9e6SYuval Mintz 9730a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2 9740a7fb11cSYuval Mintz if (p_hwfn->using_ll2) { 9750a7fb11cSYuval Mintz p_ll2_info = qed_ll2_alloc(p_hwfn); 9760a7fb11cSYuval Mintz if (!p_ll2_info) 9770a7fb11cSYuval Mintz goto alloc_no_mem; 9780a7fb11cSYuval Mintz p_hwfn->p_ll2_info = p_ll2_info; 9790a7fb11cSYuval Mintz } 9800a7fb11cSYuval Mintz #endif 9811e128c81SArun Easi 9821e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { 9831e128c81SArun Easi p_fcoe_info = qed_fcoe_alloc(p_hwfn); 9841e128c81SArun Easi if (!p_fcoe_info) 9851e128c81SArun Easi goto alloc_no_mem; 9861e128c81SArun Easi p_hwfn->p_fcoe_info = p_fcoe_info; 9871e128c81SArun Easi } 9881e128c81SArun Easi 989fc831825SYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 990fc831825SYuval Mintz p_iscsi_info = qed_iscsi_alloc(p_hwfn); 991fc831825SYuval Mintz if (!p_iscsi_info) 992fc831825SYuval Mintz goto alloc_no_mem; 993fc831825SYuval Mintz p_hwfn->p_iscsi_info = p_iscsi_info; 9941d6cff4fSYuval Mintz p_ooo_info = qed_ooo_alloc(p_hwfn); 9951d6cff4fSYuval Mintz if (!p_ooo_info) 9961d6cff4fSYuval Mintz goto alloc_no_mem; 9971d6cff4fSYuval Mintz p_hwfn->p_ooo_info = p_ooo_info; 998fc831825SYuval Mintz } 9990a7fb11cSYuval Mintz 1000fe56b9e6SYuval Mintz /* DMA info initialization */ 1001fe56b9e6SYuval Mintz rc = qed_dmae_info_alloc(p_hwfn); 10022591c280SJoe Perches if (rc) 1003fe56b9e6SYuval Mintz goto alloc_err; 100439651abdSSudarsana Reddy Kalluru 100539651abdSSudarsana Reddy Kalluru /* DCBX initialization */ 100639651abdSSudarsana Reddy Kalluru rc = qed_dcbx_info_alloc(p_hwfn); 10072591c280SJoe Perches if (rc) 100839651abdSSudarsana Reddy Kalluru goto alloc_err; 100939651abdSSudarsana Reddy Kalluru } 1010fe56b9e6SYuval Mintz 1011fe56b9e6SYuval Mintz cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); 10122591c280SJoe Perches if (!cdev->reset_stats) 101383aeb933SYuval Mintz goto alloc_no_mem; 1014fe56b9e6SYuval Mintz 1015fe56b9e6SYuval Mintz return 0; 1016fe56b9e6SYuval Mintz 1017dbb799c3SYuval Mintz alloc_no_mem: 1018dbb799c3SYuval Mintz rc = -ENOMEM; 1019fe56b9e6SYuval Mintz alloc_err: 1020fe56b9e6SYuval Mintz qed_resc_free(cdev); 1021fe56b9e6SYuval Mintz return rc; 1022fe56b9e6SYuval Mintz } 1023fe56b9e6SYuval Mintz 1024fe56b9e6SYuval Mintz void qed_resc_setup(struct qed_dev *cdev) 1025fe56b9e6SYuval Mintz { 1026fe56b9e6SYuval Mintz int i; 1027fe56b9e6SYuval Mintz 10281408cc1fSYuval Mintz if (IS_VF(cdev)) 10291408cc1fSYuval Mintz return; 10301408cc1fSYuval Mintz 1031fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 1032fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1033fe56b9e6SYuval Mintz 1034fe56b9e6SYuval Mintz qed_cxt_mngr_setup(p_hwfn); 1035fe56b9e6SYuval Mintz qed_spq_setup(p_hwfn); 1036fe56b9e6SYuval Mintz qed_eq_setup(p_hwfn, p_hwfn->p_eq); 1037fe56b9e6SYuval Mintz qed_consq_setup(p_hwfn, p_hwfn->p_consq); 1038fe56b9e6SYuval Mintz 1039fe56b9e6SYuval Mintz /* Read shadow of current MFW mailbox */ 1040fe56b9e6SYuval Mintz qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 1041fe56b9e6SYuval Mintz memcpy(p_hwfn->mcp_info->mfw_mb_shadow, 1042fe56b9e6SYuval Mintz p_hwfn->mcp_info->mfw_mb_cur, 1043fe56b9e6SYuval Mintz p_hwfn->mcp_info->mfw_mb_length); 1044fe56b9e6SYuval Mintz 1045fe56b9e6SYuval Mintz qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); 104632a47e72SYuval Mintz 104732a47e72SYuval Mintz qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); 10480a7fb11cSYuval Mintz #ifdef CONFIG_QED_LL2 10490a7fb11cSYuval Mintz if (p_hwfn->using_ll2) 10500a7fb11cSYuval Mintz qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); 10510a7fb11cSYuval Mintz #endif 10521e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 10531e128c81SArun Easi qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info); 10541e128c81SArun Easi 10551d6cff4fSYuval Mintz if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 1056fc831825SYuval Mintz qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info); 10571d6cff4fSYuval Mintz qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info); 10581d6cff4fSYuval Mintz } 1059fe56b9e6SYuval Mintz } 1060fe56b9e6SYuval Mintz } 1061fe56b9e6SYuval Mintz 1062fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_CNT (100) 1063fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_TIME (10) 1064fe56b9e6SYuval Mintz int qed_final_cleanup(struct qed_hwfn *p_hwfn, 10650b55e27dSYuval Mintz struct qed_ptt *p_ptt, u16 id, bool is_vf) 1066fe56b9e6SYuval Mintz { 1067fe56b9e6SYuval Mintz u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 1068fe56b9e6SYuval Mintz int rc = -EBUSY; 1069fe56b9e6SYuval Mintz 1070fc48b7a6SYuval Mintz addr = GTT_BAR0_MAP_REG_USDM_RAM + 1071fc48b7a6SYuval Mintz USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 1072fe56b9e6SYuval Mintz 10730b55e27dSYuval Mintz if (is_vf) 10740b55e27dSYuval Mintz id += 0x10; 10750b55e27dSYuval Mintz 1076fc48b7a6SYuval Mintz command |= X_FINAL_CLEANUP_AGG_INT << 1077fc48b7a6SYuval Mintz SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 1078fc48b7a6SYuval Mintz command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 1079fc48b7a6SYuval Mintz command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 1080fc48b7a6SYuval Mintz command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 1081fe56b9e6SYuval Mintz 1082fe56b9e6SYuval Mintz /* Make sure notification is not set before initiating final cleanup */ 1083fe56b9e6SYuval Mintz if (REG_RD(p_hwfn, addr)) { 10841a635e48SYuval Mintz DP_NOTICE(p_hwfn, 1085fe56b9e6SYuval Mintz "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 1086fe56b9e6SYuval Mintz REG_WR(p_hwfn, addr, 0); 1087fe56b9e6SYuval Mintz } 1088fe56b9e6SYuval Mintz 1089fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1090fe56b9e6SYuval Mintz "Sending final cleanup for PFVF[%d] [Command %08x\n]", 1091fe56b9e6SYuval Mintz id, command); 1092fe56b9e6SYuval Mintz 1093fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 1094fe56b9e6SYuval Mintz 1095fe56b9e6SYuval Mintz /* Poll until completion */ 1096fe56b9e6SYuval Mintz while (!REG_RD(p_hwfn, addr) && count--) 1097fe56b9e6SYuval Mintz msleep(FINAL_CLEANUP_POLL_TIME); 1098fe56b9e6SYuval Mintz 1099fe56b9e6SYuval Mintz if (REG_RD(p_hwfn, addr)) 1100fe56b9e6SYuval Mintz rc = 0; 1101fe56b9e6SYuval Mintz else 1102fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, 1103fe56b9e6SYuval Mintz "Failed to receive FW final cleanup notification\n"); 1104fe56b9e6SYuval Mintz 1105fe56b9e6SYuval Mintz /* Cleanup afterwards */ 1106fe56b9e6SYuval Mintz REG_WR(p_hwfn, addr, 0); 1107fe56b9e6SYuval Mintz 1108fe56b9e6SYuval Mintz return rc; 1109fe56b9e6SYuval Mintz } 1110fe56b9e6SYuval Mintz 11119c79ddaaSMintz, Yuval static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) 1112fe56b9e6SYuval Mintz { 1113fe56b9e6SYuval Mintz int hw_mode = 0; 1114fe56b9e6SYuval Mintz 11159c79ddaaSMintz, Yuval if (QED_IS_BB_B0(p_hwfn->cdev)) { 11169c79ddaaSMintz, Yuval hw_mode |= 1 << MODE_BB; 11179c79ddaaSMintz, Yuval } else if (QED_IS_AH(p_hwfn->cdev)) { 11189c79ddaaSMintz, Yuval hw_mode |= 1 << MODE_K2; 11199c79ddaaSMintz, Yuval } else { 11209c79ddaaSMintz, Yuval DP_NOTICE(p_hwfn, "Unknown chip type %#x\n", 11219c79ddaaSMintz, Yuval p_hwfn->cdev->type); 11229c79ddaaSMintz, Yuval return -EINVAL; 11239c79ddaaSMintz, Yuval } 1124fe56b9e6SYuval Mintz 1125fe56b9e6SYuval Mintz switch (p_hwfn->cdev->num_ports_in_engines) { 1126fe56b9e6SYuval Mintz case 1: 1127fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 1128fe56b9e6SYuval Mintz break; 1129fe56b9e6SYuval Mintz case 2: 1130fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 1131fe56b9e6SYuval Mintz break; 1132fe56b9e6SYuval Mintz case 4: 1133fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 1134fe56b9e6SYuval Mintz break; 1135fe56b9e6SYuval Mintz default: 1136fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", 1137fe56b9e6SYuval Mintz p_hwfn->cdev->num_ports_in_engines); 11389c79ddaaSMintz, Yuval return -EINVAL; 1139fe56b9e6SYuval Mintz } 1140fe56b9e6SYuval Mintz 1141fe56b9e6SYuval Mintz switch (p_hwfn->cdev->mf_mode) { 1142fc48b7a6SYuval Mintz case QED_MF_DEFAULT: 1143fc48b7a6SYuval Mintz case QED_MF_NPAR: 1144fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_MF_SI; 1145fe56b9e6SYuval Mintz break; 1146fc48b7a6SYuval Mintz case QED_MF_OVLAN: 1147fc48b7a6SYuval Mintz hw_mode |= 1 << MODE_MF_SD; 1148fc48b7a6SYuval Mintz break; 1149fe56b9e6SYuval Mintz default: 1150fc48b7a6SYuval Mintz DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); 1151fc48b7a6SYuval Mintz hw_mode |= 1 << MODE_MF_SI; 1152fe56b9e6SYuval Mintz } 1153fe56b9e6SYuval Mintz 1154fe56b9e6SYuval Mintz hw_mode |= 1 << MODE_ASIC; 1155fe56b9e6SYuval Mintz 11561af9dcf7SYuval Mintz if (p_hwfn->cdev->num_hwfns > 1) 11571af9dcf7SYuval Mintz hw_mode |= 1 << MODE_100G; 11581af9dcf7SYuval Mintz 1159fe56b9e6SYuval Mintz p_hwfn->hw_info.hw_mode = hw_mode; 11601af9dcf7SYuval Mintz 11611af9dcf7SYuval Mintz DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), 11621af9dcf7SYuval Mintz "Configuring function for hw_mode: 0x%08x\n", 11631af9dcf7SYuval Mintz p_hwfn->hw_info.hw_mode); 11649c79ddaaSMintz, Yuval 11659c79ddaaSMintz, Yuval return 0; 1166fe56b9e6SYuval Mintz } 1167fe56b9e6SYuval Mintz 1168fe56b9e6SYuval Mintz /* Init run time data for all PFs on an engine. */ 1169fe56b9e6SYuval Mintz static void qed_init_cau_rt_data(struct qed_dev *cdev) 1170fe56b9e6SYuval Mintz { 1171fe56b9e6SYuval Mintz u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 1172fe56b9e6SYuval Mintz int i, sb_id; 1173fe56b9e6SYuval Mintz 1174fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 1175fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1176fe56b9e6SYuval Mintz struct qed_igu_info *p_igu_info; 1177fe56b9e6SYuval Mintz struct qed_igu_block *p_block; 1178fe56b9e6SYuval Mintz struct cau_sb_entry sb_entry; 1179fe56b9e6SYuval Mintz 1180fe56b9e6SYuval Mintz p_igu_info = p_hwfn->hw_info.p_igu_info; 1181fe56b9e6SYuval Mintz 1182fe56b9e6SYuval Mintz for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev); 1183fe56b9e6SYuval Mintz sb_id++) { 1184fe56b9e6SYuval Mintz p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; 1185fe56b9e6SYuval Mintz if (!p_block->is_pf) 1186fe56b9e6SYuval Mintz continue; 1187fe56b9e6SYuval Mintz 1188fe56b9e6SYuval Mintz qed_init_cau_sb_entry(p_hwfn, &sb_entry, 11891a635e48SYuval Mintz p_block->function_id, 0, 0); 11901a635e48SYuval Mintz STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); 1191fe56b9e6SYuval Mintz } 1192fe56b9e6SYuval Mintz } 1193fe56b9e6SYuval Mintz } 1194fe56b9e6SYuval Mintz 119560afed72STomer Tayar static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, 119660afed72STomer Tayar struct qed_ptt *p_ptt) 119760afed72STomer Tayar { 119860afed72STomer Tayar u32 val, wr_mbs, cache_line_size; 119960afed72STomer Tayar 120060afed72STomer Tayar val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 120160afed72STomer Tayar switch (val) { 120260afed72STomer Tayar case 0: 120360afed72STomer Tayar wr_mbs = 128; 120460afed72STomer Tayar break; 120560afed72STomer Tayar case 1: 120660afed72STomer Tayar wr_mbs = 256; 120760afed72STomer Tayar break; 120860afed72STomer Tayar case 2: 120960afed72STomer Tayar wr_mbs = 512; 121060afed72STomer Tayar break; 121160afed72STomer Tayar default: 121260afed72STomer Tayar DP_INFO(p_hwfn, 121360afed72STomer Tayar "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 121460afed72STomer Tayar val); 121560afed72STomer Tayar return; 121660afed72STomer Tayar } 121760afed72STomer Tayar 121860afed72STomer Tayar cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs); 121960afed72STomer Tayar switch (cache_line_size) { 122060afed72STomer Tayar case 32: 122160afed72STomer Tayar val = 0; 122260afed72STomer Tayar break; 122360afed72STomer Tayar case 64: 122460afed72STomer Tayar val = 1; 122560afed72STomer Tayar break; 122660afed72STomer Tayar case 128: 122760afed72STomer Tayar val = 2; 122860afed72STomer Tayar break; 122960afed72STomer Tayar case 256: 123060afed72STomer Tayar val = 3; 123160afed72STomer Tayar break; 123260afed72STomer Tayar default: 123360afed72STomer Tayar DP_INFO(p_hwfn, 123460afed72STomer Tayar "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 123560afed72STomer Tayar cache_line_size); 123660afed72STomer Tayar } 123760afed72STomer Tayar 123860afed72STomer Tayar if (L1_CACHE_BYTES > wr_mbs) 123960afed72STomer Tayar DP_INFO(p_hwfn, 124060afed72STomer Tayar "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 124160afed72STomer Tayar L1_CACHE_BYTES, wr_mbs); 124260afed72STomer Tayar 124360afed72STomer Tayar STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 124460afed72STomer Tayar } 124560afed72STomer Tayar 1246fe56b9e6SYuval Mintz static int qed_hw_init_common(struct qed_hwfn *p_hwfn, 12471a635e48SYuval Mintz struct qed_ptt *p_ptt, int hw_mode) 1248fe56b9e6SYuval Mintz { 1249fe56b9e6SYuval Mintz struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1250fe56b9e6SYuval Mintz struct qed_qm_common_rt_init_params params; 1251fe56b9e6SYuval Mintz struct qed_dev *cdev = p_hwfn->cdev; 12529c79ddaaSMintz, Yuval u8 vf_id, max_num_vfs; 1253dbb799c3SYuval Mintz u16 num_pfs, pf_id; 12541408cc1fSYuval Mintz u32 concrete_fid; 1255fe56b9e6SYuval Mintz int rc = 0; 1256fe56b9e6SYuval Mintz 1257fe56b9e6SYuval Mintz qed_init_cau_rt_data(cdev); 1258fe56b9e6SYuval Mintz 1259fe56b9e6SYuval Mintz /* Program GTT windows */ 1260fe56b9e6SYuval Mintz qed_gtt_init(p_hwfn); 1261fe56b9e6SYuval Mintz 1262fe56b9e6SYuval Mintz if (p_hwfn->mcp_info) { 1263fe56b9e6SYuval Mintz if (p_hwfn->mcp_info->func_info.bandwidth_max) 1264fe56b9e6SYuval Mintz qm_info->pf_rl_en = 1; 1265fe56b9e6SYuval Mintz if (p_hwfn->mcp_info->func_info.bandwidth_min) 1266fe56b9e6SYuval Mintz qm_info->pf_wfq_en = 1; 1267fe56b9e6SYuval Mintz } 1268fe56b9e6SYuval Mintz 1269fe56b9e6SYuval Mintz memset(¶ms, 0, sizeof(params)); 1270fe56b9e6SYuval Mintz params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines; 1271fe56b9e6SYuval Mintz params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; 1272fe56b9e6SYuval Mintz params.pf_rl_en = qm_info->pf_rl_en; 1273fe56b9e6SYuval Mintz params.pf_wfq_en = qm_info->pf_wfq_en; 1274fe56b9e6SYuval Mintz params.vport_rl_en = qm_info->vport_rl_en; 1275fe56b9e6SYuval Mintz params.vport_wfq_en = qm_info->vport_wfq_en; 1276fe56b9e6SYuval Mintz params.port_params = qm_info->qm_port_params; 1277fe56b9e6SYuval Mintz 1278fe56b9e6SYuval Mintz qed_qm_common_rt_init(p_hwfn, ¶ms); 1279fe56b9e6SYuval Mintz 1280fe56b9e6SYuval Mintz qed_cxt_hw_init_common(p_hwfn); 1281fe56b9e6SYuval Mintz 128260afed72STomer Tayar qed_init_cache_line_size(p_hwfn, p_ptt); 128360afed72STomer Tayar 1284fe56b9e6SYuval Mintz rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 12851a635e48SYuval Mintz if (rc) 1286fe56b9e6SYuval Mintz return rc; 1287fe56b9e6SYuval Mintz 1288fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 1289fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 1290fe56b9e6SYuval Mintz 1291dbb799c3SYuval Mintz if (QED_IS_BB(p_hwfn->cdev)) { 1292dbb799c3SYuval Mintz num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev); 1293dbb799c3SYuval Mintz for (pf_id = 0; pf_id < num_pfs; pf_id++) { 1294dbb799c3SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, pf_id); 1295dbb799c3SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1296dbb799c3SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1297dbb799c3SYuval Mintz } 1298dbb799c3SYuval Mintz /* pretend to original PF */ 1299dbb799c3SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1300dbb799c3SYuval Mintz } 1301fe56b9e6SYuval Mintz 13029c79ddaaSMintz, Yuval max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 13039c79ddaaSMintz, Yuval for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 13041408cc1fSYuval Mintz concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); 13051408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); 13061408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 130705fafbfbSYuval Mintz qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 130805fafbfbSYuval Mintz qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 130905fafbfbSYuval Mintz qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 13101408cc1fSYuval Mintz } 13111408cc1fSYuval Mintz /* pretend to original PF */ 13121408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 13131408cc1fSYuval Mintz 1314fe56b9e6SYuval Mintz return rc; 1315fe56b9e6SYuval Mintz } 1316fe56b9e6SYuval Mintz 131751ff1725SRam Amrani static int 131851ff1725SRam Amrani qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, 131951ff1725SRam Amrani struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) 132051ff1725SRam Amrani { 1321107392b7SRam Amrani u32 dpi_bit_shift, dpi_count, dpi_page_size; 132251ff1725SRam Amrani u32 min_dpis; 1323107392b7SRam Amrani u32 n_wids; 132451ff1725SRam Amrani 132551ff1725SRam Amrani /* Calculate DPI size */ 1326107392b7SRam Amrani n_wids = max_t(u32, QED_MIN_WIDS, n_cpus); 1327107392b7SRam Amrani dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids); 1328107392b7SRam Amrani dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); 132951ff1725SRam Amrani dpi_bit_shift = ilog2(dpi_page_size / 4096); 133051ff1725SRam Amrani dpi_count = pwm_region_size / dpi_page_size; 133151ff1725SRam Amrani 133251ff1725SRam Amrani min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 133351ff1725SRam Amrani min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis); 133451ff1725SRam Amrani 133551ff1725SRam Amrani p_hwfn->dpi_size = dpi_page_size; 133651ff1725SRam Amrani p_hwfn->dpi_count = dpi_count; 133751ff1725SRam Amrani 133851ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 133951ff1725SRam Amrani 134051ff1725SRam Amrani if (dpi_count < min_dpis) 134151ff1725SRam Amrani return -EINVAL; 134251ff1725SRam Amrani 134351ff1725SRam Amrani return 0; 134451ff1725SRam Amrani } 134551ff1725SRam Amrani 134651ff1725SRam Amrani enum QED_ROCE_EDPM_MODE { 134751ff1725SRam Amrani QED_ROCE_EDPM_MODE_ENABLE = 0, 134851ff1725SRam Amrani QED_ROCE_EDPM_MODE_FORCE_ON = 1, 134951ff1725SRam Amrani QED_ROCE_EDPM_MODE_DISABLE = 2, 135051ff1725SRam Amrani }; 135151ff1725SRam Amrani 135251ff1725SRam Amrani static int 135351ff1725SRam Amrani qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 135451ff1725SRam Amrani { 135551ff1725SRam Amrani u32 pwm_regsize, norm_regsize; 135651ff1725SRam Amrani u32 non_pwm_conn, min_addr_reg1; 135720b1bd96SRam Amrani u32 db_bar_size, n_cpus = 1; 135851ff1725SRam Amrani u32 roce_edpm_mode; 135951ff1725SRam Amrani u32 pf_dems_shift; 136051ff1725SRam Amrani int rc = 0; 136151ff1725SRam Amrani u8 cond; 136251ff1725SRam Amrani 136315582962SRahul Verma db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); 136451ff1725SRam Amrani if (p_hwfn->cdev->num_hwfns > 1) 136551ff1725SRam Amrani db_bar_size /= 2; 136651ff1725SRam Amrani 136751ff1725SRam Amrani /* Calculate doorbell regions */ 136851ff1725SRam Amrani non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 136951ff1725SRam Amrani qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 137051ff1725SRam Amrani NULL) + 137151ff1725SRam Amrani qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 137251ff1725SRam Amrani NULL); 137351ff1725SRam Amrani norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096); 137451ff1725SRam Amrani min_addr_reg1 = norm_regsize / 4096; 137551ff1725SRam Amrani pwm_regsize = db_bar_size - norm_regsize; 137651ff1725SRam Amrani 137751ff1725SRam Amrani /* Check that the normal and PWM sizes are valid */ 137851ff1725SRam Amrani if (db_bar_size < norm_regsize) { 137951ff1725SRam Amrani DP_ERR(p_hwfn->cdev, 138051ff1725SRam Amrani "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", 138151ff1725SRam Amrani db_bar_size, norm_regsize); 138251ff1725SRam Amrani return -EINVAL; 138351ff1725SRam Amrani } 138451ff1725SRam Amrani 138551ff1725SRam Amrani if (pwm_regsize < QED_MIN_PWM_REGION) { 138651ff1725SRam Amrani DP_ERR(p_hwfn->cdev, 138751ff1725SRam Amrani "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", 138851ff1725SRam Amrani pwm_regsize, 138951ff1725SRam Amrani QED_MIN_PWM_REGION, db_bar_size, norm_regsize); 139051ff1725SRam Amrani return -EINVAL; 139151ff1725SRam Amrani } 139251ff1725SRam Amrani 139351ff1725SRam Amrani /* Calculate number of DPIs */ 139451ff1725SRam Amrani roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 139551ff1725SRam Amrani if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) || 139651ff1725SRam Amrani ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) { 139751ff1725SRam Amrani /* Either EDPM is mandatory, or we are attempting to allocate a 139851ff1725SRam Amrani * WID per CPU. 139951ff1725SRam Amrani */ 1400c2dedf87SRam Amrani n_cpus = num_present_cpus(); 140151ff1725SRam Amrani rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 140251ff1725SRam Amrani } 140351ff1725SRam Amrani 140451ff1725SRam Amrani cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) || 140551ff1725SRam Amrani (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE); 140651ff1725SRam Amrani if (cond || p_hwfn->dcbx_no_edpm) { 140751ff1725SRam Amrani /* Either EDPM is disabled from user configuration, or it is 140851ff1725SRam Amrani * disabled via DCBx, or it is not mandatory and we failed to 140951ff1725SRam Amrani * allocated a WID per CPU. 141051ff1725SRam Amrani */ 141151ff1725SRam Amrani n_cpus = 1; 141251ff1725SRam Amrani rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 141351ff1725SRam Amrani 141451ff1725SRam Amrani if (cond) 141551ff1725SRam Amrani qed_rdma_dpm_bar(p_hwfn, p_ptt); 141651ff1725SRam Amrani } 141751ff1725SRam Amrani 141820b1bd96SRam Amrani p_hwfn->wid_count = (u16) n_cpus; 141920b1bd96SRam Amrani 142051ff1725SRam Amrani DP_INFO(p_hwfn, 142151ff1725SRam Amrani "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", 142251ff1725SRam Amrani norm_regsize, 142351ff1725SRam Amrani pwm_regsize, 142451ff1725SRam Amrani p_hwfn->dpi_size, 142551ff1725SRam Amrani p_hwfn->dpi_count, 142651ff1725SRam Amrani ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 142751ff1725SRam Amrani "disabled" : "enabled"); 142851ff1725SRam Amrani 142951ff1725SRam Amrani if (rc) { 143051ff1725SRam Amrani DP_ERR(p_hwfn, 143151ff1725SRam Amrani "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n", 143251ff1725SRam Amrani p_hwfn->dpi_count, 143351ff1725SRam Amrani p_hwfn->pf_params.rdma_pf_params.min_dpis); 143451ff1725SRam Amrani return -EINVAL; 143551ff1725SRam Amrani } 143651ff1725SRam Amrani 143751ff1725SRam Amrani p_hwfn->dpi_start_offset = norm_regsize; 143851ff1725SRam Amrani 143951ff1725SRam Amrani /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 144051ff1725SRam Amrani pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4); 144151ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 144251ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 144351ff1725SRam Amrani 144451ff1725SRam Amrani return 0; 144551ff1725SRam Amrani } 144651ff1725SRam Amrani 1447fe56b9e6SYuval Mintz static int qed_hw_init_port(struct qed_hwfn *p_hwfn, 14481a635e48SYuval Mintz struct qed_ptt *p_ptt, int hw_mode) 1449fe56b9e6SYuval Mintz { 145005fafbfbSYuval Mintz return qed_init_run(p_hwfn, p_ptt, PHASE_PORT, 145105fafbfbSYuval Mintz p_hwfn->port_id, hw_mode); 1452fe56b9e6SYuval Mintz } 1453fe56b9e6SYuval Mintz 1454fe56b9e6SYuval Mintz static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, 1455fe56b9e6SYuval Mintz struct qed_ptt *p_ptt, 145619968430SChopra, Manish struct qed_tunnel_info *p_tunn, 1457fe56b9e6SYuval Mintz int hw_mode, 1458fe56b9e6SYuval Mintz bool b_hw_start, 1459fe56b9e6SYuval Mintz enum qed_int_mode int_mode, 1460fe56b9e6SYuval Mintz bool allow_npar_tx_switch) 1461fe56b9e6SYuval Mintz { 1462fe56b9e6SYuval Mintz u8 rel_pf_id = p_hwfn->rel_pf_id; 1463fe56b9e6SYuval Mintz int rc = 0; 1464fe56b9e6SYuval Mintz 1465fe56b9e6SYuval Mintz if (p_hwfn->mcp_info) { 1466fe56b9e6SYuval Mintz struct qed_mcp_function_info *p_info; 1467fe56b9e6SYuval Mintz 1468fe56b9e6SYuval Mintz p_info = &p_hwfn->mcp_info->func_info; 1469fe56b9e6SYuval Mintz if (p_info->bandwidth_min) 1470fe56b9e6SYuval Mintz p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 1471fe56b9e6SYuval Mintz 1472fe56b9e6SYuval Mintz /* Update rate limit once we'll actually have a link */ 14734b01e519SManish Chopra p_hwfn->qm_info.pf_rl = 100000; 1474fe56b9e6SYuval Mintz } 1475fe56b9e6SYuval Mintz 147615582962SRahul Verma qed_cxt_hw_init_pf(p_hwfn, p_ptt); 1477fe56b9e6SYuval Mintz 1478fe56b9e6SYuval Mintz qed_int_igu_init_rt(p_hwfn); 1479fe56b9e6SYuval Mintz 1480fe56b9e6SYuval Mintz /* Set VLAN in NIG if needed */ 14811a635e48SYuval Mintz if (hw_mode & BIT(MODE_MF_SD)) { 1482fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 1483fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 1484fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 1485fe56b9e6SYuval Mintz p_hwfn->hw_info.ovlan); 1486fe56b9e6SYuval Mintz } 1487fe56b9e6SYuval Mintz 1488fe56b9e6SYuval Mintz /* Enable classification by MAC if needed */ 14891a635e48SYuval Mintz if (hw_mode & BIT(MODE_MF_SI)) { 1490fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 1491fe56b9e6SYuval Mintz "Configuring TAGMAC_CLS_TYPE\n"); 1492fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, 1493fe56b9e6SYuval Mintz NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 1494fe56b9e6SYuval Mintz } 1495fe56b9e6SYuval Mintz 1496fe56b9e6SYuval Mintz /* Protocl Configuration */ 1497dbb799c3SYuval Mintz STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 1498dbb799c3SYuval Mintz (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); 14991e128c81SArun Easi STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 15001e128c81SArun Easi (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); 1501fe56b9e6SYuval Mintz STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 1502fe56b9e6SYuval Mintz 1503fe56b9e6SYuval Mintz /* Cleanup chip from previous driver if such remains exist */ 15040b55e27dSYuval Mintz rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); 15051a635e48SYuval Mintz if (rc) 1506fe56b9e6SYuval Mintz return rc; 1507fe56b9e6SYuval Mintz 1508fe56b9e6SYuval Mintz /* PF Init sequence */ 1509fe56b9e6SYuval Mintz rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 1510fe56b9e6SYuval Mintz if (rc) 1511fe56b9e6SYuval Mintz return rc; 1512fe56b9e6SYuval Mintz 1513fe56b9e6SYuval Mintz /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 1514fe56b9e6SYuval Mintz rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 1515fe56b9e6SYuval Mintz if (rc) 1516fe56b9e6SYuval Mintz return rc; 1517fe56b9e6SYuval Mintz 1518fe56b9e6SYuval Mintz /* Pure runtime initializations - directly to the HW */ 1519fe56b9e6SYuval Mintz qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 1520fe56b9e6SYuval Mintz 152151ff1725SRam Amrani rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 152251ff1725SRam Amrani if (rc) 152351ff1725SRam Amrani return rc; 152451ff1725SRam Amrani 1525fe56b9e6SYuval Mintz if (b_hw_start) { 1526fe56b9e6SYuval Mintz /* enable interrupts */ 1527fe56b9e6SYuval Mintz qed_int_igu_enable(p_hwfn, p_ptt, int_mode); 1528fe56b9e6SYuval Mintz 1529fe56b9e6SYuval Mintz /* send function start command */ 1530831bfb0eSYuval Mintz rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, 1531831bfb0eSYuval Mintz allow_npar_tx_switch); 15321e128c81SArun Easi if (rc) { 1533fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); 15341e128c81SArun Easi return rc; 15351e128c81SArun Easi } 15361e128c81SArun Easi if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { 15371e128c81SArun Easi qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2)); 15381e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 15391e128c81SArun Easi PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 15401e128c81SArun Easi 0x100); 15411e128c81SArun Easi } 1542fe56b9e6SYuval Mintz } 1543fe56b9e6SYuval Mintz return rc; 1544fe56b9e6SYuval Mintz } 1545fe56b9e6SYuval Mintz 1546fe56b9e6SYuval Mintz static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn, 1547fe56b9e6SYuval Mintz struct qed_ptt *p_ptt, 1548fe56b9e6SYuval Mintz u8 enable) 1549fe56b9e6SYuval Mintz { 1550fe56b9e6SYuval Mintz u32 delay_idx = 0, val, set_val = enable ? 1 : 0; 1551fe56b9e6SYuval Mintz 1552fe56b9e6SYuval Mintz /* Change PF in PXP */ 1553fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, 1554fe56b9e6SYuval Mintz PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 1555fe56b9e6SYuval Mintz 1556fe56b9e6SYuval Mintz /* wait until value is set - try for 1 second every 50us */ 1557fe56b9e6SYuval Mintz for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 1558fe56b9e6SYuval Mintz val = qed_rd(p_hwfn, p_ptt, 1559fe56b9e6SYuval Mintz PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1560fe56b9e6SYuval Mintz if (val == set_val) 1561fe56b9e6SYuval Mintz break; 1562fe56b9e6SYuval Mintz 1563fe56b9e6SYuval Mintz usleep_range(50, 60); 1564fe56b9e6SYuval Mintz } 1565fe56b9e6SYuval Mintz 1566fe56b9e6SYuval Mintz if (val != set_val) { 1567fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, 1568fe56b9e6SYuval Mintz "PFID_ENABLE_MASTER wasn't changed after a second\n"); 1569fe56b9e6SYuval Mintz return -EAGAIN; 1570fe56b9e6SYuval Mintz } 1571fe56b9e6SYuval Mintz 1572fe56b9e6SYuval Mintz return 0; 1573fe56b9e6SYuval Mintz } 1574fe56b9e6SYuval Mintz 1575fe56b9e6SYuval Mintz static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, 1576fe56b9e6SYuval Mintz struct qed_ptt *p_main_ptt) 1577fe56b9e6SYuval Mintz { 1578fe56b9e6SYuval Mintz /* Read shadow of current MFW mailbox */ 1579fe56b9e6SYuval Mintz qed_mcp_read_mb(p_hwfn, p_main_ptt); 1580fe56b9e6SYuval Mintz memcpy(p_hwfn->mcp_info->mfw_mb_shadow, 15811a635e48SYuval Mintz p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); 1582fe56b9e6SYuval Mintz } 1583fe56b9e6SYuval Mintz 15845d24bcf1STomer Tayar static void 15855d24bcf1STomer Tayar qed_fill_load_req_params(struct qed_load_req_params *p_load_req, 15865d24bcf1STomer Tayar struct qed_drv_load_params *p_drv_load) 15875d24bcf1STomer Tayar { 15885d24bcf1STomer Tayar memset(p_load_req, 0, sizeof(*p_load_req)); 15895d24bcf1STomer Tayar 15905d24bcf1STomer Tayar p_load_req->drv_role = p_drv_load->is_crash_kernel ? 15915d24bcf1STomer Tayar QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS; 15925d24bcf1STomer Tayar p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 15935d24bcf1STomer Tayar p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 15945d24bcf1STomer Tayar p_load_req->override_force_load = p_drv_load->override_force_load; 15955d24bcf1STomer Tayar } 15965d24bcf1STomer Tayar 1597eaf3c0c6SChopra, Manish static int qed_vf_start(struct qed_hwfn *p_hwfn, 1598eaf3c0c6SChopra, Manish struct qed_hw_init_params *p_params) 1599eaf3c0c6SChopra, Manish { 1600eaf3c0c6SChopra, Manish if (p_params->p_tunn) { 1601eaf3c0c6SChopra, Manish qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 1602eaf3c0c6SChopra, Manish qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 1603eaf3c0c6SChopra, Manish } 1604eaf3c0c6SChopra, Manish 1605eaf3c0c6SChopra, Manish p_hwfn->b_int_enabled = 1; 1606eaf3c0c6SChopra, Manish 1607eaf3c0c6SChopra, Manish return 0; 1608eaf3c0c6SChopra, Manish } 1609eaf3c0c6SChopra, Manish 1610c0c2d0b4SMintz, Yuval int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) 1611fe56b9e6SYuval Mintz { 16125d24bcf1STomer Tayar struct qed_load_req_params load_req_params; 16130fefbfbaSSudarsana Kalluru u32 load_code, param, drv_mb_param; 16140fefbfbaSSudarsana Kalluru bool b_default_mtu = true; 16150fefbfbaSSudarsana Kalluru struct qed_hwfn *p_hwfn; 16160fefbfbaSSudarsana Kalluru int rc = 0, mfw_rc, i; 1617fe56b9e6SYuval Mintz 1618c0c2d0b4SMintz, Yuval if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 1619bb13ace7SSudarsana Reddy Kalluru DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 1620bb13ace7SSudarsana Reddy Kalluru return -EINVAL; 1621bb13ace7SSudarsana Reddy Kalluru } 1622bb13ace7SSudarsana Reddy Kalluru 16231408cc1fSYuval Mintz if (IS_PF(cdev)) { 1624c0c2d0b4SMintz, Yuval rc = qed_init_fw_data(cdev, p_params->bin_fw_data); 16251a635e48SYuval Mintz if (rc) 1626fe56b9e6SYuval Mintz return rc; 16271408cc1fSYuval Mintz } 1628fe56b9e6SYuval Mintz 1629fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 1630fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1631fe56b9e6SYuval Mintz 16320fefbfbaSSudarsana Kalluru /* If management didn't provide a default, set one of our own */ 16330fefbfbaSSudarsana Kalluru if (!p_hwfn->hw_info.mtu) { 16340fefbfbaSSudarsana Kalluru p_hwfn->hw_info.mtu = 1500; 16350fefbfbaSSudarsana Kalluru b_default_mtu = false; 16360fefbfbaSSudarsana Kalluru } 16370fefbfbaSSudarsana Kalluru 16381408cc1fSYuval Mintz if (IS_VF(cdev)) { 1639eaf3c0c6SChopra, Manish qed_vf_start(p_hwfn, p_params); 16401408cc1fSYuval Mintz continue; 16411408cc1fSYuval Mintz } 16421408cc1fSYuval Mintz 1643fe56b9e6SYuval Mintz /* Enable DMAE in PXP */ 1644fe56b9e6SYuval Mintz rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); 1645fe56b9e6SYuval Mintz 16469c79ddaaSMintz, Yuval rc = qed_calc_hw_mode(p_hwfn); 16479c79ddaaSMintz, Yuval if (rc) 16489c79ddaaSMintz, Yuval return rc; 1649fe56b9e6SYuval Mintz 16505d24bcf1STomer Tayar qed_fill_load_req_params(&load_req_params, 16515d24bcf1STomer Tayar p_params->p_drv_load_params); 16525d24bcf1STomer Tayar rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 16535d24bcf1STomer Tayar &load_req_params); 1654fe56b9e6SYuval Mintz if (rc) { 16555d24bcf1STomer Tayar DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n"); 1656fe56b9e6SYuval Mintz return rc; 1657fe56b9e6SYuval Mintz } 1658fe56b9e6SYuval Mintz 16595d24bcf1STomer Tayar load_code = load_req_params.load_code; 1660fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_SP, 16615d24bcf1STomer Tayar "Load request was sent. Load code: 0x%x\n", 16625d24bcf1STomer Tayar load_code); 16635d24bcf1STomer Tayar 16645d24bcf1STomer Tayar qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 1665fe56b9e6SYuval Mintz 1666fe56b9e6SYuval Mintz p_hwfn->first_on_engine = (load_code == 1667fe56b9e6SYuval Mintz FW_MSG_CODE_DRV_LOAD_ENGINE); 1668fe56b9e6SYuval Mintz 1669fe56b9e6SYuval Mintz switch (load_code) { 1670fe56b9e6SYuval Mintz case FW_MSG_CODE_DRV_LOAD_ENGINE: 1671fe56b9e6SYuval Mintz rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 1672fe56b9e6SYuval Mintz p_hwfn->hw_info.hw_mode); 1673fe56b9e6SYuval Mintz if (rc) 1674fe56b9e6SYuval Mintz break; 1675fe56b9e6SYuval Mintz /* Fall into */ 1676fe56b9e6SYuval Mintz case FW_MSG_CODE_DRV_LOAD_PORT: 1677fe56b9e6SYuval Mintz rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 1678fe56b9e6SYuval Mintz p_hwfn->hw_info.hw_mode); 1679fe56b9e6SYuval Mintz if (rc) 1680fe56b9e6SYuval Mintz break; 1681fe56b9e6SYuval Mintz 1682fe56b9e6SYuval Mintz /* Fall into */ 1683fe56b9e6SYuval Mintz case FW_MSG_CODE_DRV_LOAD_FUNCTION: 1684fe56b9e6SYuval Mintz rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 1685c0c2d0b4SMintz, Yuval p_params->p_tunn, 1686c0c2d0b4SMintz, Yuval p_hwfn->hw_info.hw_mode, 1687c0c2d0b4SMintz, Yuval p_params->b_hw_start, 1688c0c2d0b4SMintz, Yuval p_params->int_mode, 1689c0c2d0b4SMintz, Yuval p_params->allow_npar_tx_switch); 1690fe56b9e6SYuval Mintz break; 1691fe56b9e6SYuval Mintz default: 1692c0c2d0b4SMintz, Yuval DP_NOTICE(p_hwfn, 1693c0c2d0b4SMintz, Yuval "Unexpected load code [0x%08x]", load_code); 1694fe56b9e6SYuval Mintz rc = -EINVAL; 1695fe56b9e6SYuval Mintz break; 1696fe56b9e6SYuval Mintz } 1697fe56b9e6SYuval Mintz 1698fe56b9e6SYuval Mintz if (rc) 1699fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, 1700fe56b9e6SYuval Mintz "init phase failed for loadcode 0x%x (rc %d)\n", 1701fe56b9e6SYuval Mintz load_code, rc); 1702fe56b9e6SYuval Mintz 1703fe56b9e6SYuval Mintz /* ACK mfw regardless of success or failure of initialization */ 1704fe56b9e6SYuval Mintz mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 1705fe56b9e6SYuval Mintz DRV_MSG_CODE_LOAD_DONE, 1706fe56b9e6SYuval Mintz 0, &load_code, ¶m); 1707fe56b9e6SYuval Mintz if (rc) 1708fe56b9e6SYuval Mintz return rc; 1709fe56b9e6SYuval Mintz if (mfw_rc) { 1710fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n"); 1711fe56b9e6SYuval Mintz return mfw_rc; 1712fe56b9e6SYuval Mintz } 1713fe56b9e6SYuval Mintz 171439651abdSSudarsana Reddy Kalluru /* send DCBX attention request command */ 171539651abdSSudarsana Reddy Kalluru DP_VERBOSE(p_hwfn, 171639651abdSSudarsana Reddy Kalluru QED_MSG_DCB, 171739651abdSSudarsana Reddy Kalluru "sending phony dcbx set command to trigger DCBx attention handling\n"); 171839651abdSSudarsana Reddy Kalluru mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 171939651abdSSudarsana Reddy Kalluru DRV_MSG_CODE_SET_DCBX, 172039651abdSSudarsana Reddy Kalluru 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, 172139651abdSSudarsana Reddy Kalluru &load_code, ¶m); 172239651abdSSudarsana Reddy Kalluru if (mfw_rc) { 172339651abdSSudarsana Reddy Kalluru DP_NOTICE(p_hwfn, 172439651abdSSudarsana Reddy Kalluru "Failed to send DCBX attention request\n"); 172539651abdSSudarsana Reddy Kalluru return mfw_rc; 172639651abdSSudarsana Reddy Kalluru } 172739651abdSSudarsana Reddy Kalluru 1728fe56b9e6SYuval Mintz p_hwfn->hw_init_done = true; 1729fe56b9e6SYuval Mintz } 1730fe56b9e6SYuval Mintz 17310fefbfbaSSudarsana Kalluru if (IS_PF(cdev)) { 17320fefbfbaSSudarsana Kalluru p_hwfn = QED_LEADING_HWFN(cdev); 17335d24bcf1STomer Tayar drv_mb_param = STORM_FW_VERSION; 17340fefbfbaSSudarsana Kalluru rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 17350fefbfbaSSudarsana Kalluru DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 17360fefbfbaSSudarsana Kalluru drv_mb_param, &load_code, ¶m); 17370fefbfbaSSudarsana Kalluru if (rc) 17380fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, "Failed to update firmware version\n"); 17390fefbfbaSSudarsana Kalluru 17400fefbfbaSSudarsana Kalluru if (!b_default_mtu) { 17410fefbfbaSSudarsana Kalluru rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 17420fefbfbaSSudarsana Kalluru p_hwfn->hw_info.mtu); 17430fefbfbaSSudarsana Kalluru if (rc) 17440fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, 17450fefbfbaSSudarsana Kalluru "Failed to update default mtu\n"); 17460fefbfbaSSudarsana Kalluru } 17470fefbfbaSSudarsana Kalluru 17480fefbfbaSSudarsana Kalluru rc = qed_mcp_ov_update_driver_state(p_hwfn, 17490fefbfbaSSudarsana Kalluru p_hwfn->p_main_ptt, 17500fefbfbaSSudarsana Kalluru QED_OV_DRIVER_STATE_DISABLED); 17510fefbfbaSSudarsana Kalluru if (rc) 17520fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, "Failed to update driver state\n"); 17530fefbfbaSSudarsana Kalluru 17540fefbfbaSSudarsana Kalluru rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 17550fefbfbaSSudarsana Kalluru QED_OV_ESWITCH_VEB); 17560fefbfbaSSudarsana Kalluru if (rc) 17570fefbfbaSSudarsana Kalluru DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 17580fefbfbaSSudarsana Kalluru } 17590fefbfbaSSudarsana Kalluru 1760fe56b9e6SYuval Mintz return 0; 1761fe56b9e6SYuval Mintz } 1762fe56b9e6SYuval Mintz 1763fe56b9e6SYuval Mintz #define QED_HW_STOP_RETRY_LIMIT (10) 17641a635e48SYuval Mintz static void qed_hw_timers_stop(struct qed_dev *cdev, 17651a635e48SYuval Mintz struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 17668c925c44SYuval Mintz { 17678c925c44SYuval Mintz int i; 17688c925c44SYuval Mintz 17698c925c44SYuval Mintz /* close timers */ 17708c925c44SYuval Mintz qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 17718c925c44SYuval Mintz qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 17728c925c44SYuval Mintz 17738c925c44SYuval Mintz for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { 17748c925c44SYuval Mintz if ((!qed_rd(p_hwfn, p_ptt, 17758c925c44SYuval Mintz TM_REG_PF_SCAN_ACTIVE_CONN)) && 17761a635e48SYuval Mintz (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) 17778c925c44SYuval Mintz break; 17788c925c44SYuval Mintz 17798c925c44SYuval Mintz /* Dependent on number of connection/tasks, possibly 17808c925c44SYuval Mintz * 1ms sleep is required between polls 17818c925c44SYuval Mintz */ 17828c925c44SYuval Mintz usleep_range(1000, 2000); 17838c925c44SYuval Mintz } 17848c925c44SYuval Mintz 17858c925c44SYuval Mintz if (i < QED_HW_STOP_RETRY_LIMIT) 17868c925c44SYuval Mintz return; 17878c925c44SYuval Mintz 17888c925c44SYuval Mintz DP_NOTICE(p_hwfn, 17898c925c44SYuval Mintz "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 17908c925c44SYuval Mintz (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 17918c925c44SYuval Mintz (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 17928c925c44SYuval Mintz } 17938c925c44SYuval Mintz 17948c925c44SYuval Mintz void qed_hw_timers_stop_all(struct qed_dev *cdev) 17958c925c44SYuval Mintz { 17968c925c44SYuval Mintz int j; 17978c925c44SYuval Mintz 17988c925c44SYuval Mintz for_each_hwfn(cdev, j) { 17998c925c44SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 18008c925c44SYuval Mintz struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; 18018c925c44SYuval Mintz 18028c925c44SYuval Mintz qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 18038c925c44SYuval Mintz } 18048c925c44SYuval Mintz } 18058c925c44SYuval Mintz 1806fe56b9e6SYuval Mintz int qed_hw_stop(struct qed_dev *cdev) 1807fe56b9e6SYuval Mintz { 18081226337aSTomer Tayar struct qed_hwfn *p_hwfn; 18091226337aSTomer Tayar struct qed_ptt *p_ptt; 18101226337aSTomer Tayar int rc, rc2 = 0; 18118c925c44SYuval Mintz int j; 1812fe56b9e6SYuval Mintz 1813fe56b9e6SYuval Mintz for_each_hwfn(cdev, j) { 18141226337aSTomer Tayar p_hwfn = &cdev->hwfns[j]; 18151226337aSTomer Tayar p_ptt = p_hwfn->p_main_ptt; 1816fe56b9e6SYuval Mintz 1817fe56b9e6SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); 1818fe56b9e6SYuval Mintz 18191408cc1fSYuval Mintz if (IS_VF(cdev)) { 18200b55e27dSYuval Mintz qed_vf_pf_int_cleanup(p_hwfn); 18211226337aSTomer Tayar rc = qed_vf_pf_reset(p_hwfn); 18221226337aSTomer Tayar if (rc) { 18231226337aSTomer Tayar DP_NOTICE(p_hwfn, 18241226337aSTomer Tayar "qed_vf_pf_reset failed. rc = %d.\n", 18251226337aSTomer Tayar rc); 18261226337aSTomer Tayar rc2 = -EINVAL; 18271226337aSTomer Tayar } 18281408cc1fSYuval Mintz continue; 18291408cc1fSYuval Mintz } 18301408cc1fSYuval Mintz 1831fe56b9e6SYuval Mintz /* mark the hw as uninitialized... */ 1832fe56b9e6SYuval Mintz p_hwfn->hw_init_done = false; 1833fe56b9e6SYuval Mintz 18341226337aSTomer Tayar /* Send unload command to MCP */ 18351226337aSTomer Tayar rc = qed_mcp_unload_req(p_hwfn, p_ptt); 18361226337aSTomer Tayar if (rc) { 18378c925c44SYuval Mintz DP_NOTICE(p_hwfn, 18381226337aSTomer Tayar "Failed sending a UNLOAD_REQ command. rc = %d.\n", 18391226337aSTomer Tayar rc); 18401226337aSTomer Tayar rc2 = -EINVAL; 18411226337aSTomer Tayar } 18421226337aSTomer Tayar 18431226337aSTomer Tayar qed_slowpath_irq_sync(p_hwfn); 18441226337aSTomer Tayar 18451226337aSTomer Tayar /* After this point no MFW attentions are expected, e.g. prevent 18461226337aSTomer Tayar * race between pf stop and dcbx pf update. 18471226337aSTomer Tayar */ 18481226337aSTomer Tayar rc = qed_sp_pf_stop(p_hwfn); 18491226337aSTomer Tayar if (rc) { 18501226337aSTomer Tayar DP_NOTICE(p_hwfn, 18511226337aSTomer Tayar "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 18521226337aSTomer Tayar rc); 18531226337aSTomer Tayar rc2 = -EINVAL; 18541226337aSTomer Tayar } 1855fe56b9e6SYuval Mintz 1856fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, 1857fe56b9e6SYuval Mintz NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 1858fe56b9e6SYuval Mintz 1859fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1860fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 1861fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 1862fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1863fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 1864fe56b9e6SYuval Mintz 18658c925c44SYuval Mintz qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 1866fe56b9e6SYuval Mintz 1867fe56b9e6SYuval Mintz /* Disable Attention Generation */ 1868fe56b9e6SYuval Mintz qed_int_igu_disable_int(p_hwfn, p_ptt); 1869fe56b9e6SYuval Mintz 1870fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 1871fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 1872fe56b9e6SYuval Mintz 1873fe56b9e6SYuval Mintz qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 1874fe56b9e6SYuval Mintz 1875fe56b9e6SYuval Mintz /* Need to wait 1ms to guarantee SBs are cleared */ 1876fe56b9e6SYuval Mintz usleep_range(1000, 2000); 18771226337aSTomer Tayar 18781226337aSTomer Tayar /* Disable PF in HW blocks */ 18791226337aSTomer Tayar qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 18801226337aSTomer Tayar qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 18811226337aSTomer Tayar 18821226337aSTomer Tayar qed_mcp_unload_done(p_hwfn, p_ptt); 18831226337aSTomer Tayar if (rc) { 18841226337aSTomer Tayar DP_NOTICE(p_hwfn, 18851226337aSTomer Tayar "Failed sending a UNLOAD_DONE command. rc = %d.\n", 18861226337aSTomer Tayar rc); 18871226337aSTomer Tayar rc2 = -EINVAL; 18881226337aSTomer Tayar } 1889fe56b9e6SYuval Mintz } 1890fe56b9e6SYuval Mintz 18911408cc1fSYuval Mintz if (IS_PF(cdev)) { 18921226337aSTomer Tayar p_hwfn = QED_LEADING_HWFN(cdev); 18931226337aSTomer Tayar p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; 18941226337aSTomer Tayar 1895fe56b9e6SYuval Mintz /* Disable DMAE in PXP - in CMT, this should only be done for 1896fe56b9e6SYuval Mintz * first hw-function, and only after all transactions have 1897fe56b9e6SYuval Mintz * stopped for all active hw-functions. 1898fe56b9e6SYuval Mintz */ 18991226337aSTomer Tayar rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false); 19001226337aSTomer Tayar if (rc) { 19011226337aSTomer Tayar DP_NOTICE(p_hwfn, 19021226337aSTomer Tayar "qed_change_pci_hwfn failed. rc = %d.\n", rc); 19031226337aSTomer Tayar rc2 = -EINVAL; 19041226337aSTomer Tayar } 19051408cc1fSYuval Mintz } 1906fe56b9e6SYuval Mintz 19071226337aSTomer Tayar return rc2; 1908fe56b9e6SYuval Mintz } 1909fe56b9e6SYuval Mintz 191015582962SRahul Verma int qed_hw_stop_fastpath(struct qed_dev *cdev) 1911cee4d264SManish Chopra { 19128c925c44SYuval Mintz int j; 1913cee4d264SManish Chopra 1914cee4d264SManish Chopra for_each_hwfn(cdev, j) { 1915cee4d264SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 191615582962SRahul Verma struct qed_ptt *p_ptt; 1917cee4d264SManish Chopra 1918dacd88d6SYuval Mintz if (IS_VF(cdev)) { 1919dacd88d6SYuval Mintz qed_vf_pf_int_cleanup(p_hwfn); 1920dacd88d6SYuval Mintz continue; 1921dacd88d6SYuval Mintz } 192215582962SRahul Verma p_ptt = qed_ptt_acquire(p_hwfn); 192315582962SRahul Verma if (!p_ptt) 192415582962SRahul Verma return -EAGAIN; 1925dacd88d6SYuval Mintz 1926cee4d264SManish Chopra DP_VERBOSE(p_hwfn, 19271a635e48SYuval Mintz NETIF_MSG_IFDOWN, "Shutting down the fastpath\n"); 1928cee4d264SManish Chopra 1929cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, 1930cee4d264SManish Chopra NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 1931cee4d264SManish Chopra 1932cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1933cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 1934cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 1935cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1936cee4d264SManish Chopra qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 1937cee4d264SManish Chopra 1938cee4d264SManish Chopra qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 1939cee4d264SManish Chopra 1940cee4d264SManish Chopra /* Need to wait 1ms to guarantee SBs are cleared */ 1941cee4d264SManish Chopra usleep_range(1000, 2000); 194215582962SRahul Verma qed_ptt_release(p_hwfn, p_ptt); 1943cee4d264SManish Chopra } 1944cee4d264SManish Chopra 194515582962SRahul Verma return 0; 194615582962SRahul Verma } 194715582962SRahul Verma 194815582962SRahul Verma int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) 1949cee4d264SManish Chopra { 195015582962SRahul Verma struct qed_ptt *p_ptt; 195115582962SRahul Verma 1952dacd88d6SYuval Mintz if (IS_VF(p_hwfn->cdev)) 195315582962SRahul Verma return 0; 195415582962SRahul Verma 195515582962SRahul Verma p_ptt = qed_ptt_acquire(p_hwfn); 195615582962SRahul Verma if (!p_ptt) 195715582962SRahul Verma return -EAGAIN; 1958dacd88d6SYuval Mintz 1959cee4d264SManish Chopra /* Re-open incoming traffic */ 196015582962SRahul Verma qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 196115582962SRahul Verma qed_ptt_release(p_hwfn, p_ptt); 196215582962SRahul Verma 196315582962SRahul Verma return 0; 1964cee4d264SManish Chopra } 1965cee4d264SManish Chopra 1966fe56b9e6SYuval Mintz /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 1967fe56b9e6SYuval Mintz static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) 1968fe56b9e6SYuval Mintz { 1969fe56b9e6SYuval Mintz qed_ptt_pool_free(p_hwfn); 1970fe56b9e6SYuval Mintz kfree(p_hwfn->hw_info.p_igu_info); 1971fe56b9e6SYuval Mintz } 1972fe56b9e6SYuval Mintz 1973fe56b9e6SYuval Mintz /* Setup bar access */ 197412e09c69SYuval Mintz static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) 1975fe56b9e6SYuval Mintz { 1976fe56b9e6SYuval Mintz /* clear indirect access */ 19779c79ddaaSMintz, Yuval if (QED_IS_AH(p_hwfn->cdev)) { 19789c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19799c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0); 19809c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19819c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0); 19829c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19839c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0); 19849c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19859c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0); 19869c79ddaaSMintz, Yuval } else { 19879c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19889c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 19899c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19909c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 19919c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19929c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 19939c79ddaaSMintz, Yuval qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19949c79ddaaSMintz, Yuval PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 19959c79ddaaSMintz, Yuval } 1996fe56b9e6SYuval Mintz 1997fe56b9e6SYuval Mintz /* Clean Previous errors if such exist */ 1998fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_hwfn->p_main_ptt, 19991a635e48SYuval Mintz PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); 2000fe56b9e6SYuval Mintz 2001fe56b9e6SYuval Mintz /* enable internal target-read */ 2002fe56b9e6SYuval Mintz qed_wr(p_hwfn, p_hwfn->p_main_ptt, 2003fe56b9e6SYuval Mintz PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 2004fe56b9e6SYuval Mintz } 2005fe56b9e6SYuval Mintz 2006fe56b9e6SYuval Mintz static void get_function_id(struct qed_hwfn *p_hwfn) 2007fe56b9e6SYuval Mintz { 2008fe56b9e6SYuval Mintz /* ME Register */ 20091a635e48SYuval Mintz p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 20101a635e48SYuval Mintz PXP_PF_ME_OPAQUE_ADDR); 2011fe56b9e6SYuval Mintz 2012fe56b9e6SYuval Mintz p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 2013fe56b9e6SYuval Mintz 2014fe56b9e6SYuval Mintz p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 2015fe56b9e6SYuval Mintz p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2016fe56b9e6SYuval Mintz PXP_CONCRETE_FID_PFID); 2017fe56b9e6SYuval Mintz p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2018fe56b9e6SYuval Mintz PXP_CONCRETE_FID_PORT); 2019525ef5c0SYuval Mintz 2020525ef5c0SYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, 2021525ef5c0SYuval Mintz "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 2022525ef5c0SYuval Mintz p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 2023fe56b9e6SYuval Mintz } 2024fe56b9e6SYuval Mintz 202525c089d7SYuval Mintz static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) 202625c089d7SYuval Mintz { 202725c089d7SYuval Mintz u32 *feat_num = p_hwfn->hw_info.feat_num; 20285a1f965aSMintz, Yuval struct qed_sb_cnt_info sb_cnt_info; 2029810bb1f0SMintz, Yuval u32 non_l2_sbs = 0; 203025c089d7SYuval Mintz 20310189efb8SYuval Mintz if (IS_ENABLED(CONFIG_QED_RDMA) && 20320189efb8SYuval Mintz p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { 20330189efb8SYuval Mintz /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide 20340189efb8SYuval Mintz * the status blocks equally between L2 / RoCE but with 20350189efb8SYuval Mintz * consideration as to how many l2 queues / cnqs we have. 203651ff1725SRam Amrani */ 203751ff1725SRam Amrani feat_num[QED_RDMA_CNQ] = 2038810bb1f0SMintz, Yuval min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2, 203951ff1725SRam Amrani RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); 2040810bb1f0SMintz, Yuval 2041810bb1f0SMintz, Yuval non_l2_sbs = feat_num[QED_RDMA_CNQ]; 204251ff1725SRam Amrani } 20430189efb8SYuval Mintz 2044dec26533SMintz, Yuval if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || 2045dec26533SMintz, Yuval p_hwfn->hw_info.personality == QED_PCI_ETH) { 2046dec26533SMintz, Yuval /* Start by allocating VF queues, then PF's */ 2047dec26533SMintz, Yuval memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 2048dec26533SMintz, Yuval qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); 2049dec26533SMintz, Yuval feat_num[QED_VF_L2_QUE] = min_t(u32, 2050dec26533SMintz, Yuval RESC_NUM(p_hwfn, QED_L2_QUEUE), 2051dec26533SMintz, Yuval sb_cnt_info.sb_iov_cnt); 2052810bb1f0SMintz, Yuval feat_num[QED_PF_L2_QUE] = min_t(u32, 2053810bb1f0SMintz, Yuval RESC_NUM(p_hwfn, QED_SB) - 2054810bb1f0SMintz, Yuval non_l2_sbs, 2055dec26533SMintz, Yuval RESC_NUM(p_hwfn, 2056dec26533SMintz, Yuval QED_L2_QUEUE) - 2057dec26533SMintz, Yuval FEAT_NUM(p_hwfn, 2058dec26533SMintz, Yuval QED_VF_L2_QUE)); 2059dec26533SMintz, Yuval } 20605a1f965aSMintz, Yuval 206108737a3fSMintz, Yuval if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) 206208737a3fSMintz, Yuval feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB), 206308737a3fSMintz, Yuval RESC_NUM(p_hwfn, 206408737a3fSMintz, Yuval QED_CMDQS_CQS)); 20655a1f965aSMintz, Yuval DP_VERBOSE(p_hwfn, 20665a1f965aSMintz, Yuval NETIF_MSG_PROBE, 206708737a3fSMintz, Yuval "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n", 20685a1f965aSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), 20695a1f965aSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), 20705a1f965aSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), 207108737a3fSMintz, Yuval (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), 2072810bb1f0SMintz, Yuval RESC_NUM(p_hwfn, QED_SB)); 207325c089d7SYuval Mintz } 207425c089d7SYuval Mintz 20759c8517c4STomer Tayar const char *qed_hw_get_resc_name(enum qed_resources res_id) 20762edbff8dSTomer Tayar { 20772edbff8dSTomer Tayar switch (res_id) { 20782edbff8dSTomer Tayar case QED_L2_QUEUE: 20792edbff8dSTomer Tayar return "L2_QUEUE"; 20802edbff8dSTomer Tayar case QED_VPORT: 20812edbff8dSTomer Tayar return "VPORT"; 20822edbff8dSTomer Tayar case QED_RSS_ENG: 20832edbff8dSTomer Tayar return "RSS_ENG"; 20842edbff8dSTomer Tayar case QED_PQ: 20852edbff8dSTomer Tayar return "PQ"; 20862edbff8dSTomer Tayar case QED_RL: 20872edbff8dSTomer Tayar return "RL"; 20882edbff8dSTomer Tayar case QED_MAC: 20892edbff8dSTomer Tayar return "MAC"; 20902edbff8dSTomer Tayar case QED_VLAN: 20912edbff8dSTomer Tayar return "VLAN"; 20922edbff8dSTomer Tayar case QED_RDMA_CNQ_RAM: 20932edbff8dSTomer Tayar return "RDMA_CNQ_RAM"; 20942edbff8dSTomer Tayar case QED_ILT: 20952edbff8dSTomer Tayar return "ILT"; 20962edbff8dSTomer Tayar case QED_LL2_QUEUE: 20972edbff8dSTomer Tayar return "LL2_QUEUE"; 20982edbff8dSTomer Tayar case QED_CMDQS_CQS: 20992edbff8dSTomer Tayar return "CMDQS_CQS"; 21002edbff8dSTomer Tayar case QED_RDMA_STATS_QUEUE: 21012edbff8dSTomer Tayar return "RDMA_STATS_QUEUE"; 21029c8517c4STomer Tayar case QED_BDQ: 21039c8517c4STomer Tayar return "BDQ"; 21049c8517c4STomer Tayar case QED_SB: 21059c8517c4STomer Tayar return "SB"; 21062edbff8dSTomer Tayar default: 21072edbff8dSTomer Tayar return "UNKNOWN_RESOURCE"; 21082edbff8dSTomer Tayar } 21092edbff8dSTomer Tayar } 21102edbff8dSTomer Tayar 21119c8517c4STomer Tayar static int 21129c8517c4STomer Tayar __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, 21139c8517c4STomer Tayar struct qed_ptt *p_ptt, 21149c8517c4STomer Tayar enum qed_resources res_id, 21159c8517c4STomer Tayar u32 resc_max_val, u32 *p_mcp_resp) 21169c8517c4STomer Tayar { 21179c8517c4STomer Tayar int rc; 21189c8517c4STomer Tayar 21199c8517c4STomer Tayar rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, 21209c8517c4STomer Tayar resc_max_val, p_mcp_resp); 21219c8517c4STomer Tayar if (rc) { 21229c8517c4STomer Tayar DP_NOTICE(p_hwfn, 21239c8517c4STomer Tayar "MFW response failure for a max value setting of resource %d [%s]\n", 21249c8517c4STomer Tayar res_id, qed_hw_get_resc_name(res_id)); 21259c8517c4STomer Tayar return rc; 21269c8517c4STomer Tayar } 21279c8517c4STomer Tayar 21289c8517c4STomer Tayar if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 21299c8517c4STomer Tayar DP_INFO(p_hwfn, 21309c8517c4STomer Tayar "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 21319c8517c4STomer Tayar res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp); 21329c8517c4STomer Tayar 21339c8517c4STomer Tayar return 0; 21349c8517c4STomer Tayar } 21359c8517c4STomer Tayar 21369c8517c4STomer Tayar static int 21379c8517c4STomer Tayar qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 21389c8517c4STomer Tayar { 21399c8517c4STomer Tayar bool b_ah = QED_IS_AH(p_hwfn->cdev); 21409c8517c4STomer Tayar u32 resc_max_val, mcp_resp; 21419c8517c4STomer Tayar u8 res_id; 21429c8517c4STomer Tayar int rc; 21439c8517c4STomer Tayar 21449c8517c4STomer Tayar for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { 21459c8517c4STomer Tayar switch (res_id) { 21469c8517c4STomer Tayar case QED_LL2_QUEUE: 21479c8517c4STomer Tayar resc_max_val = MAX_NUM_LL2_RX_QUEUES; 21489c8517c4STomer Tayar break; 21499c8517c4STomer Tayar case QED_RDMA_CNQ_RAM: 21509c8517c4STomer Tayar /* No need for a case for QED_CMDQS_CQS since 21519c8517c4STomer Tayar * CNQ/CMDQS are the same resource. 21529c8517c4STomer Tayar */ 21539c8517c4STomer Tayar resc_max_val = NUM_OF_CMDQS_CQS; 21549c8517c4STomer Tayar break; 21559c8517c4STomer Tayar case QED_RDMA_STATS_QUEUE: 21569c8517c4STomer Tayar resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 21579c8517c4STomer Tayar : RDMA_NUM_STATISTIC_COUNTERS_BB; 21589c8517c4STomer Tayar break; 21599c8517c4STomer Tayar case QED_BDQ: 21609c8517c4STomer Tayar resc_max_val = BDQ_NUM_RESOURCES; 21619c8517c4STomer Tayar break; 21629c8517c4STomer Tayar default: 21639c8517c4STomer Tayar continue; 21649c8517c4STomer Tayar } 21659c8517c4STomer Tayar 21669c8517c4STomer Tayar rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, 21679c8517c4STomer Tayar resc_max_val, &mcp_resp); 21689c8517c4STomer Tayar if (rc) 21699c8517c4STomer Tayar return rc; 21709c8517c4STomer Tayar 21719c8517c4STomer Tayar /* There's no point to continue to the next resource if the 21729c8517c4STomer Tayar * command is not supported by the MFW. 21739c8517c4STomer Tayar * We do continue if the command is supported but the resource 21749c8517c4STomer Tayar * is unknown to the MFW. Such a resource will be later 21759c8517c4STomer Tayar * configured with the default allocation values. 21769c8517c4STomer Tayar */ 21779c8517c4STomer Tayar if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 21789c8517c4STomer Tayar return -EINVAL; 21799c8517c4STomer Tayar } 21809c8517c4STomer Tayar 21819c8517c4STomer Tayar return 0; 21829c8517c4STomer Tayar } 21839c8517c4STomer Tayar 21849c8517c4STomer Tayar static 21859c8517c4STomer Tayar int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, 21869c8517c4STomer Tayar enum qed_resources res_id, 21879c8517c4STomer Tayar u32 *p_resc_num, u32 *p_resc_start) 21889c8517c4STomer Tayar { 21899c8517c4STomer Tayar u8 num_funcs = p_hwfn->num_funcs_on_engine; 21909c8517c4STomer Tayar bool b_ah = QED_IS_AH(p_hwfn->cdev); 21919c8517c4STomer Tayar struct qed_sb_cnt_info sb_cnt_info; 21929c8517c4STomer Tayar 21939c8517c4STomer Tayar switch (res_id) { 21949c8517c4STomer Tayar case QED_L2_QUEUE: 21959c8517c4STomer Tayar *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 21969c8517c4STomer Tayar MAX_NUM_L2_QUEUES_BB) / num_funcs; 21979c8517c4STomer Tayar break; 21989c8517c4STomer Tayar case QED_VPORT: 21999c8517c4STomer Tayar *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 22009c8517c4STomer Tayar MAX_NUM_VPORTS_BB) / num_funcs; 22019c8517c4STomer Tayar break; 22029c8517c4STomer Tayar case QED_RSS_ENG: 22039c8517c4STomer Tayar *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 22049c8517c4STomer Tayar ETH_RSS_ENGINE_NUM_BB) / num_funcs; 22059c8517c4STomer Tayar break; 22069c8517c4STomer Tayar case QED_PQ: 22079c8517c4STomer Tayar *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 22089c8517c4STomer Tayar MAX_QM_TX_QUEUES_BB) / num_funcs; 22099c8517c4STomer Tayar *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ 22109c8517c4STomer Tayar break; 22119c8517c4STomer Tayar case QED_RL: 22129c8517c4STomer Tayar *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 22139c8517c4STomer Tayar break; 22149c8517c4STomer Tayar case QED_MAC: 22159c8517c4STomer Tayar case QED_VLAN: 22169c8517c4STomer Tayar /* Each VFC resource can accommodate both a MAC and a VLAN */ 22179c8517c4STomer Tayar *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 22189c8517c4STomer Tayar break; 22199c8517c4STomer Tayar case QED_ILT: 22209c8517c4STomer Tayar *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 22219c8517c4STomer Tayar PXP_NUM_ILT_RECORDS_BB) / num_funcs; 22229c8517c4STomer Tayar break; 22239c8517c4STomer Tayar case QED_LL2_QUEUE: 22249c8517c4STomer Tayar *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 22259c8517c4STomer Tayar break; 22269c8517c4STomer Tayar case QED_RDMA_CNQ_RAM: 22279c8517c4STomer Tayar case QED_CMDQS_CQS: 22289c8517c4STomer Tayar /* CNQ/CMDQS are the same resource */ 22299c8517c4STomer Tayar *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs; 22309c8517c4STomer Tayar break; 22319c8517c4STomer Tayar case QED_RDMA_STATS_QUEUE: 22329c8517c4STomer Tayar *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : 22339c8517c4STomer Tayar RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs; 22349c8517c4STomer Tayar break; 22359c8517c4STomer Tayar case QED_BDQ: 22369c8517c4STomer Tayar if (p_hwfn->hw_info.personality != QED_PCI_ISCSI && 22379c8517c4STomer Tayar p_hwfn->hw_info.personality != QED_PCI_FCOE) 22389c8517c4STomer Tayar *p_resc_num = 0; 22399c8517c4STomer Tayar else 22409c8517c4STomer Tayar *p_resc_num = 1; 22419c8517c4STomer Tayar break; 22429c8517c4STomer Tayar case QED_SB: 22439c8517c4STomer Tayar memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 22449c8517c4STomer Tayar qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); 22459c8517c4STomer Tayar *p_resc_num = sb_cnt_info.sb_cnt; 22469c8517c4STomer Tayar break; 22479c8517c4STomer Tayar default: 22489c8517c4STomer Tayar return -EINVAL; 22499c8517c4STomer Tayar } 22509c8517c4STomer Tayar 22519c8517c4STomer Tayar switch (res_id) { 22529c8517c4STomer Tayar case QED_BDQ: 22539c8517c4STomer Tayar if (!*p_resc_num) 22549c8517c4STomer Tayar *p_resc_start = 0; 22559c8517c4STomer Tayar else if (p_hwfn->cdev->num_ports_in_engines == 4) 22569c8517c4STomer Tayar *p_resc_start = p_hwfn->port_id; 22579c8517c4STomer Tayar else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) 22589c8517c4STomer Tayar *p_resc_start = p_hwfn->port_id; 22599c8517c4STomer Tayar else if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 22609c8517c4STomer Tayar *p_resc_start = p_hwfn->port_id + 2; 22619c8517c4STomer Tayar break; 22629c8517c4STomer Tayar default: 22639c8517c4STomer Tayar *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 22649c8517c4STomer Tayar break; 22659c8517c4STomer Tayar } 22669c8517c4STomer Tayar 22679c8517c4STomer Tayar return 0; 22689c8517c4STomer Tayar } 22699c8517c4STomer Tayar 22709c8517c4STomer Tayar static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, 22712edbff8dSTomer Tayar enum qed_resources res_id) 22722edbff8dSTomer Tayar { 22739c8517c4STomer Tayar u32 dflt_resc_num = 0, dflt_resc_start = 0; 22749c8517c4STomer Tayar u32 mcp_resp, *p_resc_num, *p_resc_start; 22752edbff8dSTomer Tayar int rc; 22762edbff8dSTomer Tayar 22772edbff8dSTomer Tayar p_resc_num = &RESC_NUM(p_hwfn, res_id); 22782edbff8dSTomer Tayar p_resc_start = &RESC_START(p_hwfn, res_id); 22792edbff8dSTomer Tayar 22809c8517c4STomer Tayar rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 22819c8517c4STomer Tayar &dflt_resc_start); 22829c8517c4STomer Tayar if (rc) { 22832edbff8dSTomer Tayar DP_ERR(p_hwfn, 22842edbff8dSTomer Tayar "Failed to get default amount for resource %d [%s]\n", 22852edbff8dSTomer Tayar res_id, qed_hw_get_resc_name(res_id)); 22869c8517c4STomer Tayar return rc; 22872edbff8dSTomer Tayar } 22882edbff8dSTomer Tayar 22899c8517c4STomer Tayar rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 22909c8517c4STomer Tayar &mcp_resp, p_resc_num, p_resc_start); 22912edbff8dSTomer Tayar if (rc) { 22922edbff8dSTomer Tayar DP_NOTICE(p_hwfn, 22932edbff8dSTomer Tayar "MFW response failure for an allocation request for resource %d [%s]\n", 22942edbff8dSTomer Tayar res_id, qed_hw_get_resc_name(res_id)); 22952edbff8dSTomer Tayar return rc; 22962edbff8dSTomer Tayar } 22972edbff8dSTomer Tayar 22982edbff8dSTomer Tayar /* Default driver values are applied in the following cases: 22992edbff8dSTomer Tayar * - The resource allocation MB command is not supported by the MFW 23002edbff8dSTomer Tayar * - There is an internal error in the MFW while processing the request 23012edbff8dSTomer Tayar * - The resource ID is unknown to the MFW 23022edbff8dSTomer Tayar */ 23039c8517c4STomer Tayar if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 23049c8517c4STomer Tayar DP_INFO(p_hwfn, 23059c8517c4STomer Tayar "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", 23062edbff8dSTomer Tayar res_id, 23072edbff8dSTomer Tayar qed_hw_get_resc_name(res_id), 23082edbff8dSTomer Tayar mcp_resp, dflt_resc_num, dflt_resc_start); 23092edbff8dSTomer Tayar *p_resc_num = dflt_resc_num; 23102edbff8dSTomer Tayar *p_resc_start = dflt_resc_start; 23112edbff8dSTomer Tayar goto out; 23122edbff8dSTomer Tayar } 23132edbff8dSTomer Tayar 23142edbff8dSTomer Tayar /* Special handling for status blocks; Would be revised in future */ 23152edbff8dSTomer Tayar if (res_id == QED_SB) { 23169c8517c4STomer Tayar *p_resc_num -= 1; 23179c8517c4STomer Tayar *p_resc_start -= p_hwfn->enabled_func_idx; 23182edbff8dSTomer Tayar } 23192edbff8dSTomer Tayar out: 23202edbff8dSTomer Tayar /* PQs have to divide by 8 [that's the HW granularity]. 23212edbff8dSTomer Tayar * Reduce number so it would fit. 23222edbff8dSTomer Tayar */ 23232edbff8dSTomer Tayar if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) { 23242edbff8dSTomer Tayar DP_INFO(p_hwfn, 23252edbff8dSTomer Tayar "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", 23262edbff8dSTomer Tayar *p_resc_num, 23272edbff8dSTomer Tayar (*p_resc_num) & ~0x7, 23282edbff8dSTomer Tayar *p_resc_start, (*p_resc_start) & ~0x7); 23292edbff8dSTomer Tayar *p_resc_num &= ~0x7; 23302edbff8dSTomer Tayar *p_resc_start &= ~0x7; 23312edbff8dSTomer Tayar } 23322edbff8dSTomer Tayar 23332edbff8dSTomer Tayar return 0; 23342edbff8dSTomer Tayar } 23352edbff8dSTomer Tayar 23369c8517c4STomer Tayar static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn) 2337fe56b9e6SYuval Mintz { 23389c8517c4STomer Tayar int rc; 23399c8517c4STomer Tayar u8 res_id; 23409c8517c4STomer Tayar 23419c8517c4STomer Tayar for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { 23429c8517c4STomer Tayar rc = __qed_hw_set_resc_info(p_hwfn, res_id); 23439c8517c4STomer Tayar if (rc) 23449c8517c4STomer Tayar return rc; 23459c8517c4STomer Tayar } 23469c8517c4STomer Tayar 23479c8517c4STomer Tayar return 0; 23489c8517c4STomer Tayar } 23499c8517c4STomer Tayar 23509c8517c4STomer Tayar static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 23519c8517c4STomer Tayar { 23529c8517c4STomer Tayar struct qed_resc_unlock_params resc_unlock_params; 23539c8517c4STomer Tayar struct qed_resc_lock_params resc_lock_params; 23549c79ddaaSMintz, Yuval bool b_ah = QED_IS_AH(p_hwfn->cdev); 23552edbff8dSTomer Tayar u8 res_id; 23562edbff8dSTomer Tayar int rc; 2357fe56b9e6SYuval Mintz 23589c8517c4STomer Tayar /* Setting the max values of the soft resources and the following 23599c8517c4STomer Tayar * resources allocation queries should be atomic. Since several PFs can 23609c8517c4STomer Tayar * run in parallel - a resource lock is needed. 23619c8517c4STomer Tayar * If either the resource lock or resource set value commands are not 23629c8517c4STomer Tayar * supported - skip the the max values setting, release the lock if 23639c8517c4STomer Tayar * needed, and proceed to the queries. Other failures, including a 23649c8517c4STomer Tayar * failure to acquire the lock, will cause this function to fail. 23659c8517c4STomer Tayar */ 2366f470f22cSsudarsana.kalluru@cavium.com qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, 2367f470f22cSsudarsana.kalluru@cavium.com QED_RESC_LOCK_RESC_ALLOC, false); 23689c8517c4STomer Tayar 23699c8517c4STomer Tayar rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); 23709c8517c4STomer Tayar if (rc && rc != -EINVAL) { 23712edbff8dSTomer Tayar return rc; 23729c8517c4STomer Tayar } else if (rc == -EINVAL) { 23739c8517c4STomer Tayar DP_INFO(p_hwfn, 23749c8517c4STomer Tayar "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 23759c8517c4STomer Tayar } else if (!rc && !resc_lock_params.b_granted) { 23769c8517c4STomer Tayar DP_NOTICE(p_hwfn, 23779c8517c4STomer Tayar "Failed to acquire the resource lock for the resource allocation commands\n"); 23789c8517c4STomer Tayar return -EBUSY; 23799c8517c4STomer Tayar } else { 23809c8517c4STomer Tayar rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt); 23819c8517c4STomer Tayar if (rc && rc != -EINVAL) { 23829c8517c4STomer Tayar DP_NOTICE(p_hwfn, 23839c8517c4STomer Tayar "Failed to set the max values of the soft resources\n"); 23849c8517c4STomer Tayar goto unlock_and_exit; 23859c8517c4STomer Tayar } else if (rc == -EINVAL) { 23869c8517c4STomer Tayar DP_INFO(p_hwfn, 23879c8517c4STomer Tayar "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 23889c8517c4STomer Tayar rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, 23899c8517c4STomer Tayar &resc_unlock_params); 23909c8517c4STomer Tayar if (rc) 23919c8517c4STomer Tayar DP_INFO(p_hwfn, 23929c8517c4STomer Tayar "Failed to release the resource lock for the resource allocation commands\n"); 23939c8517c4STomer Tayar } 23949c8517c4STomer Tayar } 23959c8517c4STomer Tayar 23969c8517c4STomer Tayar rc = qed_hw_set_resc_info(p_hwfn); 23979c8517c4STomer Tayar if (rc) 23989c8517c4STomer Tayar goto unlock_and_exit; 23999c8517c4STomer Tayar 24009c8517c4STomer Tayar if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 24019c8517c4STomer Tayar rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); 24029c8517c4STomer Tayar if (rc) 24039c8517c4STomer Tayar DP_INFO(p_hwfn, 24049c8517c4STomer Tayar "Failed to release the resource lock for the resource allocation commands\n"); 24052edbff8dSTomer Tayar } 2406dbb799c3SYuval Mintz 2407dbb799c3SYuval Mintz /* Sanity for ILT */ 24089c79ddaaSMintz, Yuval if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 24099c79ddaaSMintz, Yuval (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 2410dbb799c3SYuval Mintz DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", 2411dbb799c3SYuval Mintz RESC_START(p_hwfn, QED_ILT), 2412dbb799c3SYuval Mintz RESC_END(p_hwfn, QED_ILT) - 1); 2413dbb799c3SYuval Mintz return -EINVAL; 2414dbb799c3SYuval Mintz } 2415fe56b9e6SYuval Mintz 241625c089d7SYuval Mintz qed_hw_set_feat(p_hwfn); 241725c089d7SYuval Mintz 24182edbff8dSTomer Tayar for (res_id = 0; res_id < QED_MAX_RESC; res_id++) 24192edbff8dSTomer Tayar DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n", 24202edbff8dSTomer Tayar qed_hw_get_resc_name(res_id), 24212edbff8dSTomer Tayar RESC_NUM(p_hwfn, res_id), 24222edbff8dSTomer Tayar RESC_START(p_hwfn, res_id)); 2423dbb799c3SYuval Mintz 2424dbb799c3SYuval Mintz return 0; 24259c8517c4STomer Tayar 24269c8517c4STomer Tayar unlock_and_exit: 24279c8517c4STomer Tayar if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 24289c8517c4STomer Tayar qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); 24299c8517c4STomer Tayar return rc; 2430fe56b9e6SYuval Mintz } 2431fe56b9e6SYuval Mintz 24321a635e48SYuval Mintz static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2433fe56b9e6SYuval Mintz { 2434fc48b7a6SYuval Mintz u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 24351e128c81SArun Easi u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; 2436cc875c2eSYuval Mintz struct qed_mcp_link_params *link; 2437fe56b9e6SYuval Mintz 2438fe56b9e6SYuval Mintz /* Read global nvm_cfg address */ 2439fe56b9e6SYuval Mintz nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 2440fe56b9e6SYuval Mintz 2441fe56b9e6SYuval Mintz /* Verify MCP has initialized it */ 2442fe56b9e6SYuval Mintz if (!nvm_cfg_addr) { 2443fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); 2444fe56b9e6SYuval Mintz return -EINVAL; 2445fe56b9e6SYuval Mintz } 2446fe56b9e6SYuval Mintz 2447fe56b9e6SYuval Mintz /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 2448fe56b9e6SYuval Mintz nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 2449fe56b9e6SYuval Mintz 2450cc875c2eSYuval Mintz addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2451cc875c2eSYuval Mintz offsetof(struct nvm_cfg1, glob) + 2452cc875c2eSYuval Mintz offsetof(struct nvm_cfg1_glob, core_cfg); 2453cc875c2eSYuval Mintz 2454cc875c2eSYuval Mintz core_cfg = qed_rd(p_hwfn, p_ptt, addr); 2455cc875c2eSYuval Mintz 2456cc875c2eSYuval Mintz switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 2457cc875c2eSYuval Mintz NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 2458351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 2459cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; 2460cc875c2eSYuval Mintz break; 2461351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 2462cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; 2463cc875c2eSYuval Mintz break; 2464351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 2465cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; 2466cc875c2eSYuval Mintz break; 2467351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 2468cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; 2469cc875c2eSYuval Mintz break; 2470351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 2471cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; 2472cc875c2eSYuval Mintz break; 2473351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 2474cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; 2475cc875c2eSYuval Mintz break; 2476351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 2477cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; 2478cc875c2eSYuval Mintz break; 2479351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 2480cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; 2481cc875c2eSYuval Mintz break; 24829c79ddaaSMintz, Yuval case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 24839c79ddaaSMintz, Yuval p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G; 24849c79ddaaSMintz, Yuval break; 2485351a4dedSYuval Mintz case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 2486cc875c2eSYuval Mintz p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; 2487cc875c2eSYuval Mintz break; 24889c79ddaaSMintz, Yuval case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 24899c79ddaaSMintz, Yuval p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G; 24909c79ddaaSMintz, Yuval break; 2491cc875c2eSYuval Mintz default: 24921a635e48SYuval Mintz DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); 2493cc875c2eSYuval Mintz break; 2494cc875c2eSYuval Mintz } 2495cc875c2eSYuval Mintz 2496cc875c2eSYuval Mintz /* Read default link configuration */ 2497cc875c2eSYuval Mintz link = &p_hwfn->mcp_info->link_input; 2498cc875c2eSYuval Mintz port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2499cc875c2eSYuval Mintz offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 2500cc875c2eSYuval Mintz link_temp = qed_rd(p_hwfn, p_ptt, 2501cc875c2eSYuval Mintz port_cfg_addr + 2502cc875c2eSYuval Mintz offsetof(struct nvm_cfg1_port, speed_cap_mask)); 250383aeb933SYuval Mintz link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 250483aeb933SYuval Mintz link->speed.advertised_speeds = link_temp; 2505cc875c2eSYuval Mintz 250683aeb933SYuval Mintz link_temp = link->speed.advertised_speeds; 250783aeb933SYuval Mintz p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; 2508cc875c2eSYuval Mintz 2509cc875c2eSYuval Mintz link_temp = qed_rd(p_hwfn, p_ptt, 2510cc875c2eSYuval Mintz port_cfg_addr + 2511cc875c2eSYuval Mintz offsetof(struct nvm_cfg1_port, link_settings)); 2512cc875c2eSYuval Mintz switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 2513cc875c2eSYuval Mintz NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 2514cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 2515cc875c2eSYuval Mintz link->speed.autoneg = true; 2516cc875c2eSYuval Mintz break; 2517cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 2518cc875c2eSYuval Mintz link->speed.forced_speed = 1000; 2519cc875c2eSYuval Mintz break; 2520cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 2521cc875c2eSYuval Mintz link->speed.forced_speed = 10000; 2522cc875c2eSYuval Mintz break; 2523cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 2524cc875c2eSYuval Mintz link->speed.forced_speed = 25000; 2525cc875c2eSYuval Mintz break; 2526cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 2527cc875c2eSYuval Mintz link->speed.forced_speed = 40000; 2528cc875c2eSYuval Mintz break; 2529cc875c2eSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 2530cc875c2eSYuval Mintz link->speed.forced_speed = 50000; 2531cc875c2eSYuval Mintz break; 2532351a4dedSYuval Mintz case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 2533cc875c2eSYuval Mintz link->speed.forced_speed = 100000; 2534cc875c2eSYuval Mintz break; 2535cc875c2eSYuval Mintz default: 25361a635e48SYuval Mintz DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); 2537cc875c2eSYuval Mintz } 2538cc875c2eSYuval Mintz 2539cc875c2eSYuval Mintz link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 2540cc875c2eSYuval Mintz link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 2541cc875c2eSYuval Mintz link->pause.autoneg = !!(link_temp & 2542cc875c2eSYuval Mintz NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 2543cc875c2eSYuval Mintz link->pause.forced_rx = !!(link_temp & 2544cc875c2eSYuval Mintz NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 2545cc875c2eSYuval Mintz link->pause.forced_tx = !!(link_temp & 2546cc875c2eSYuval Mintz NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 2547cc875c2eSYuval Mintz link->loopback_mode = 0; 2548cc875c2eSYuval Mintz 2549cc875c2eSYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 2550cc875c2eSYuval Mintz "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n", 2551cc875c2eSYuval Mintz link->speed.forced_speed, link->speed.advertised_speeds, 2552cc875c2eSYuval Mintz link->speed.autoneg, link->pause.autoneg); 2553cc875c2eSYuval Mintz 2554fe56b9e6SYuval Mintz /* Read Multi-function information from shmem */ 2555fe56b9e6SYuval Mintz addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2556fe56b9e6SYuval Mintz offsetof(struct nvm_cfg1, glob) + 2557fe56b9e6SYuval Mintz offsetof(struct nvm_cfg1_glob, generic_cont0); 2558fe56b9e6SYuval Mintz 2559fe56b9e6SYuval Mintz generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); 2560fe56b9e6SYuval Mintz 2561fe56b9e6SYuval Mintz mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 2562fe56b9e6SYuval Mintz NVM_CFG1_GLOB_MF_MODE_OFFSET; 2563fe56b9e6SYuval Mintz 2564fe56b9e6SYuval Mintz switch (mf_mode) { 2565fe56b9e6SYuval Mintz case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 2566fc48b7a6SYuval Mintz p_hwfn->cdev->mf_mode = QED_MF_OVLAN; 2567fe56b9e6SYuval Mintz break; 2568fe56b9e6SYuval Mintz case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 2569fc48b7a6SYuval Mintz p_hwfn->cdev->mf_mode = QED_MF_NPAR; 2570fe56b9e6SYuval Mintz break; 2571fc48b7a6SYuval Mintz case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 2572fc48b7a6SYuval Mintz p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; 2573fe56b9e6SYuval Mintz break; 2574fe56b9e6SYuval Mintz } 2575fe56b9e6SYuval Mintz DP_INFO(p_hwfn, "Multi function mode is %08x\n", 2576fe56b9e6SYuval Mintz p_hwfn->cdev->mf_mode); 2577fe56b9e6SYuval Mintz 2578fc48b7a6SYuval Mintz /* Read Multi-function information from shmem */ 2579fc48b7a6SYuval Mintz addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2580fc48b7a6SYuval Mintz offsetof(struct nvm_cfg1, glob) + 2581fc48b7a6SYuval Mintz offsetof(struct nvm_cfg1_glob, device_capabilities); 2582fc48b7a6SYuval Mintz 2583fc48b7a6SYuval Mintz device_capabilities = qed_rd(p_hwfn, p_ptt, addr); 2584fc48b7a6SYuval Mintz if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 2585fc48b7a6SYuval Mintz __set_bit(QED_DEV_CAP_ETH, 2586fc48b7a6SYuval Mintz &p_hwfn->hw_info.device_capabilities); 25871e128c81SArun Easi if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 25881e128c81SArun Easi __set_bit(QED_DEV_CAP_FCOE, 25891e128c81SArun Easi &p_hwfn->hw_info.device_capabilities); 2590c5ac9319SYuval Mintz if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 2591c5ac9319SYuval Mintz __set_bit(QED_DEV_CAP_ISCSI, 2592c5ac9319SYuval Mintz &p_hwfn->hw_info.device_capabilities); 2593c5ac9319SYuval Mintz if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 2594c5ac9319SYuval Mintz __set_bit(QED_DEV_CAP_ROCE, 2595c5ac9319SYuval Mintz &p_hwfn->hw_info.device_capabilities); 2596fc48b7a6SYuval Mintz 2597fe56b9e6SYuval Mintz return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 2598fe56b9e6SYuval Mintz } 2599fe56b9e6SYuval Mintz 26001408cc1fSYuval Mintz static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 26011408cc1fSYuval Mintz { 2602dbb799c3SYuval Mintz u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 2603dbb799c3SYuval Mintz u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 26049c79ddaaSMintz, Yuval struct qed_dev *cdev = p_hwfn->cdev; 26051408cc1fSYuval Mintz 26069c79ddaaSMintz, Yuval num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 26071408cc1fSYuval Mintz 26081408cc1fSYuval Mintz /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 26091408cc1fSYuval Mintz * in the other bits are selected. 26101408cc1fSYuval Mintz * Bits 1-15 are for functions 1-15, respectively, and their value is 26111408cc1fSYuval Mintz * '0' only for enabled functions (function 0 always exists and 26121408cc1fSYuval Mintz * enabled). 26131408cc1fSYuval Mintz * In case of CMT, only the "even" functions are enabled, and thus the 26141408cc1fSYuval Mintz * number of functions for both hwfns is learnt from the same bits. 26151408cc1fSYuval Mintz */ 26161408cc1fSYuval Mintz reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); 26171408cc1fSYuval Mintz 26181408cc1fSYuval Mintz if (reg_function_hide & 0x1) { 26199c79ddaaSMintz, Yuval if (QED_IS_BB(cdev)) { 26209c79ddaaSMintz, Yuval if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) { 26211408cc1fSYuval Mintz num_funcs = 0; 26221408cc1fSYuval Mintz eng_mask = 0xaaaa; 26231408cc1fSYuval Mintz } else { 26241408cc1fSYuval Mintz num_funcs = 1; 26251408cc1fSYuval Mintz eng_mask = 0x5554; 26261408cc1fSYuval Mintz } 26279c79ddaaSMintz, Yuval } else { 26289c79ddaaSMintz, Yuval num_funcs = 1; 26299c79ddaaSMintz, Yuval eng_mask = 0xfffe; 26309c79ddaaSMintz, Yuval } 26311408cc1fSYuval Mintz 26321408cc1fSYuval Mintz /* Get the number of the enabled functions on the engine */ 26331408cc1fSYuval Mintz tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 26341408cc1fSYuval Mintz while (tmp) { 26351408cc1fSYuval Mintz if (tmp & 0x1) 26361408cc1fSYuval Mintz num_funcs++; 26371408cc1fSYuval Mintz tmp >>= 0x1; 26381408cc1fSYuval Mintz } 2639dbb799c3SYuval Mintz 2640dbb799c3SYuval Mintz /* Get the PF index within the enabled functions */ 2641dbb799c3SYuval Mintz low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 2642dbb799c3SYuval Mintz tmp = reg_function_hide & eng_mask & low_pfs_mask; 2643dbb799c3SYuval Mintz while (tmp) { 2644dbb799c3SYuval Mintz if (tmp & 0x1) 2645dbb799c3SYuval Mintz enabled_func_idx--; 2646dbb799c3SYuval Mintz tmp >>= 0x1; 2647dbb799c3SYuval Mintz } 26481408cc1fSYuval Mintz } 26491408cc1fSYuval Mintz 26501408cc1fSYuval Mintz p_hwfn->num_funcs_on_engine = num_funcs; 2651dbb799c3SYuval Mintz p_hwfn->enabled_func_idx = enabled_func_idx; 26521408cc1fSYuval Mintz 26531408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 26541408cc1fSYuval Mintz NETIF_MSG_PROBE, 2655525ef5c0SYuval Mintz "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 26561408cc1fSYuval Mintz p_hwfn->rel_pf_id, 26571408cc1fSYuval Mintz p_hwfn->abs_pf_id, 2658525ef5c0SYuval Mintz p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 26591408cc1fSYuval Mintz } 26601408cc1fSYuval Mintz 26619c79ddaaSMintz, Yuval static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, 26629c79ddaaSMintz, Yuval struct qed_ptt *p_ptt) 2663fe56b9e6SYuval Mintz { 2664fe56b9e6SYuval Mintz u32 port_mode; 2665fe56b9e6SYuval Mintz 26669c79ddaaSMintz, Yuval port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); 2667fe56b9e6SYuval Mintz 2668fe56b9e6SYuval Mintz if (port_mode < 3) { 2669fe56b9e6SYuval Mintz p_hwfn->cdev->num_ports_in_engines = 1; 2670fe56b9e6SYuval Mintz } else if (port_mode <= 5) { 2671fe56b9e6SYuval Mintz p_hwfn->cdev->num_ports_in_engines = 2; 2672fe56b9e6SYuval Mintz } else { 2673fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", 2674fe56b9e6SYuval Mintz p_hwfn->cdev->num_ports_in_engines); 2675fe56b9e6SYuval Mintz 2676fe56b9e6SYuval Mintz /* Default num_ports_in_engines to something */ 2677fe56b9e6SYuval Mintz p_hwfn->cdev->num_ports_in_engines = 1; 2678fe56b9e6SYuval Mintz } 26799c79ddaaSMintz, Yuval } 26809c79ddaaSMintz, Yuval 26819c79ddaaSMintz, Yuval static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn, 26829c79ddaaSMintz, Yuval struct qed_ptt *p_ptt) 26839c79ddaaSMintz, Yuval { 26849c79ddaaSMintz, Yuval u32 port; 26859c79ddaaSMintz, Yuval int i; 26869c79ddaaSMintz, Yuval 26879c79ddaaSMintz, Yuval p_hwfn->cdev->num_ports_in_engines = 0; 26889c79ddaaSMintz, Yuval 26899c79ddaaSMintz, Yuval for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 26909c79ddaaSMintz, Yuval port = qed_rd(p_hwfn, p_ptt, 26919c79ddaaSMintz, Yuval CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4)); 26929c79ddaaSMintz, Yuval if (port & 1) 26939c79ddaaSMintz, Yuval p_hwfn->cdev->num_ports_in_engines++; 26949c79ddaaSMintz, Yuval } 26959c79ddaaSMintz, Yuval 26969c79ddaaSMintz, Yuval if (!p_hwfn->cdev->num_ports_in_engines) { 26979c79ddaaSMintz, Yuval DP_NOTICE(p_hwfn, "All NIG ports are inactive\n"); 26989c79ddaaSMintz, Yuval 26999c79ddaaSMintz, Yuval /* Default num_ports_in_engine to something */ 27009c79ddaaSMintz, Yuval p_hwfn->cdev->num_ports_in_engines = 1; 27019c79ddaaSMintz, Yuval } 27029c79ddaaSMintz, Yuval } 27039c79ddaaSMintz, Yuval 27049c79ddaaSMintz, Yuval static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 27059c79ddaaSMintz, Yuval { 27069c79ddaaSMintz, Yuval if (QED_IS_BB(p_hwfn->cdev)) 27079c79ddaaSMintz, Yuval qed_hw_info_port_num_bb(p_hwfn, p_ptt); 27089c79ddaaSMintz, Yuval else 27099c79ddaaSMintz, Yuval qed_hw_info_port_num_ah(p_hwfn, p_ptt); 27109c79ddaaSMintz, Yuval } 27119c79ddaaSMintz, Yuval 27129c79ddaaSMintz, Yuval static int 27139c79ddaaSMintz, Yuval qed_get_hw_info(struct qed_hwfn *p_hwfn, 27149c79ddaaSMintz, Yuval struct qed_ptt *p_ptt, 27159c79ddaaSMintz, Yuval enum qed_pci_personality personality) 27169c79ddaaSMintz, Yuval { 27179c79ddaaSMintz, Yuval int rc; 27189c79ddaaSMintz, Yuval 27199c79ddaaSMintz, Yuval /* Since all information is common, only first hwfns should do this */ 27209c79ddaaSMintz, Yuval if (IS_LEAD_HWFN(p_hwfn)) { 27219c79ddaaSMintz, Yuval rc = qed_iov_hw_info(p_hwfn); 27229c79ddaaSMintz, Yuval if (rc) 27239c79ddaaSMintz, Yuval return rc; 27249c79ddaaSMintz, Yuval } 27259c79ddaaSMintz, Yuval 27269c79ddaaSMintz, Yuval qed_hw_info_port_num(p_hwfn, p_ptt); 2727fe56b9e6SYuval Mintz 2728fe56b9e6SYuval Mintz qed_hw_get_nvm_info(p_hwfn, p_ptt); 2729fe56b9e6SYuval Mintz 2730fe56b9e6SYuval Mintz rc = qed_int_igu_read_cam(p_hwfn, p_ptt); 2731fe56b9e6SYuval Mintz if (rc) 2732fe56b9e6SYuval Mintz return rc; 2733fe56b9e6SYuval Mintz 2734fe56b9e6SYuval Mintz if (qed_mcp_is_init(p_hwfn)) 2735fe56b9e6SYuval Mintz ether_addr_copy(p_hwfn->hw_info.hw_mac_addr, 2736fe56b9e6SYuval Mintz p_hwfn->mcp_info->func_info.mac); 2737fe56b9e6SYuval Mintz else 2738fe56b9e6SYuval Mintz eth_random_addr(p_hwfn->hw_info.hw_mac_addr); 2739fe56b9e6SYuval Mintz 2740fe56b9e6SYuval Mintz if (qed_mcp_is_init(p_hwfn)) { 2741fe56b9e6SYuval Mintz if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET) 2742fe56b9e6SYuval Mintz p_hwfn->hw_info.ovlan = 2743fe56b9e6SYuval Mintz p_hwfn->mcp_info->func_info.ovlan; 2744fe56b9e6SYuval Mintz 2745fe56b9e6SYuval Mintz qed_mcp_cmd_port_init(p_hwfn, p_ptt); 2746fe56b9e6SYuval Mintz } 2747fe56b9e6SYuval Mintz 2748fe56b9e6SYuval Mintz if (qed_mcp_is_init(p_hwfn)) { 2749fe56b9e6SYuval Mintz enum qed_pci_personality protocol; 2750fe56b9e6SYuval Mintz 2751fe56b9e6SYuval Mintz protocol = p_hwfn->mcp_info->func_info.protocol; 2752fe56b9e6SYuval Mintz p_hwfn->hw_info.personality = protocol; 2753fe56b9e6SYuval Mintz } 2754fe56b9e6SYuval Mintz 2755b5a9ee7cSAriel Elior p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 2756b5a9ee7cSAriel Elior p_hwfn->hw_info.num_active_tc = 1; 2757b5a9ee7cSAriel Elior 27581408cc1fSYuval Mintz qed_get_num_funcs(p_hwfn, p_ptt); 27591408cc1fSYuval Mintz 27600fefbfbaSSudarsana Kalluru if (qed_mcp_is_init(p_hwfn)) 27610fefbfbaSSudarsana Kalluru p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 27620fefbfbaSSudarsana Kalluru 27639c8517c4STomer Tayar return qed_hw_get_resc(p_hwfn, p_ptt); 2764fe56b9e6SYuval Mintz } 2765fe56b9e6SYuval Mintz 276615582962SRahul Verma static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2767fe56b9e6SYuval Mintz { 276815582962SRahul Verma struct qed_dev *cdev = p_hwfn->cdev; 27699c79ddaaSMintz, Yuval u16 device_id_mask; 2770fe56b9e6SYuval Mintz u32 tmp; 2771fe56b9e6SYuval Mintz 2772fc48b7a6SYuval Mintz /* Read Vendor Id / Device Id */ 27731a635e48SYuval Mintz pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); 27741a635e48SYuval Mintz pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); 27751a635e48SYuval Mintz 27769c79ddaaSMintz, Yuval /* Determine type */ 27779c79ddaaSMintz, Yuval device_id_mask = cdev->device_id & QED_DEV_ID_MASK; 27789c79ddaaSMintz, Yuval switch (device_id_mask) { 27799c79ddaaSMintz, Yuval case QED_DEV_ID_MASK_BB: 27809c79ddaaSMintz, Yuval cdev->type = QED_DEV_TYPE_BB; 27819c79ddaaSMintz, Yuval break; 27829c79ddaaSMintz, Yuval case QED_DEV_ID_MASK_AH: 27839c79ddaaSMintz, Yuval cdev->type = QED_DEV_TYPE_AH; 27849c79ddaaSMintz, Yuval break; 27859c79ddaaSMintz, Yuval default: 27869c79ddaaSMintz, Yuval DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id); 27879c79ddaaSMintz, Yuval return -EBUSY; 27889c79ddaaSMintz, Yuval } 27899c79ddaaSMintz, Yuval 279015582962SRahul Verma cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); 279115582962SRahul Verma cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); 279215582962SRahul Verma 2793fe56b9e6SYuval Mintz MASK_FIELD(CHIP_REV, cdev->chip_rev); 2794fe56b9e6SYuval Mintz 2795fe56b9e6SYuval Mintz /* Learn number of HW-functions */ 279615582962SRahul Verma tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); 2797fe56b9e6SYuval Mintz 2798fc48b7a6SYuval Mintz if (tmp & (1 << p_hwfn->rel_pf_id)) { 2799fe56b9e6SYuval Mintz DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); 2800fe56b9e6SYuval Mintz cdev->num_hwfns = 2; 2801fe56b9e6SYuval Mintz } else { 2802fe56b9e6SYuval Mintz cdev->num_hwfns = 1; 2803fe56b9e6SYuval Mintz } 2804fe56b9e6SYuval Mintz 280515582962SRahul Verma cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt, 2806fe56b9e6SYuval Mintz MISCS_REG_CHIP_TEST_REG) >> 4; 2807fe56b9e6SYuval Mintz MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); 280815582962SRahul Verma cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); 2809fe56b9e6SYuval Mintz MASK_FIELD(CHIP_METAL, cdev->chip_metal); 2810fe56b9e6SYuval Mintz 2811fe56b9e6SYuval Mintz DP_INFO(cdev->hwfns, 28129c79ddaaSMintz, Yuval "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 28139c79ddaaSMintz, Yuval QED_IS_BB(cdev) ? "BB" : "AH", 28149c79ddaaSMintz, Yuval 'A' + cdev->chip_rev, 28159c79ddaaSMintz, Yuval (int)cdev->chip_metal, 2816fe56b9e6SYuval Mintz cdev->chip_num, cdev->chip_rev, 2817fe56b9e6SYuval Mintz cdev->chip_bond_id, cdev->chip_metal); 281812e09c69SYuval Mintz 281912e09c69SYuval Mintz if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) { 282012e09c69SYuval Mintz DP_NOTICE(cdev->hwfns, 282112e09c69SYuval Mintz "The chip type/rev (BB A0) is not supported!\n"); 282212e09c69SYuval Mintz return -EINVAL; 282312e09c69SYuval Mintz } 282412e09c69SYuval Mintz 282512e09c69SYuval Mintz return 0; 2826fe56b9e6SYuval Mintz } 2827fe56b9e6SYuval Mintz 2828fe56b9e6SYuval Mintz static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, 2829fe56b9e6SYuval Mintz void __iomem *p_regview, 2830fe56b9e6SYuval Mintz void __iomem *p_doorbells, 2831fe56b9e6SYuval Mintz enum qed_pci_personality personality) 2832fe56b9e6SYuval Mintz { 2833fe56b9e6SYuval Mintz int rc = 0; 2834fe56b9e6SYuval Mintz 2835fe56b9e6SYuval Mintz /* Split PCI bars evenly between hwfns */ 2836fe56b9e6SYuval Mintz p_hwfn->regview = p_regview; 2837fe56b9e6SYuval Mintz p_hwfn->doorbells = p_doorbells; 2838fe56b9e6SYuval Mintz 28391408cc1fSYuval Mintz if (IS_VF(p_hwfn->cdev)) 28401408cc1fSYuval Mintz return qed_vf_hw_prepare(p_hwfn); 28411408cc1fSYuval Mintz 2842fe56b9e6SYuval Mintz /* Validate that chip access is feasible */ 2843fe56b9e6SYuval Mintz if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 2844fe56b9e6SYuval Mintz DP_ERR(p_hwfn, 2845fe56b9e6SYuval Mintz "Reading the ME register returns all Fs; Preventing further chip access\n"); 2846fe56b9e6SYuval Mintz return -EINVAL; 2847fe56b9e6SYuval Mintz } 2848fe56b9e6SYuval Mintz 2849fe56b9e6SYuval Mintz get_function_id(p_hwfn); 2850fe56b9e6SYuval Mintz 285112e09c69SYuval Mintz /* Allocate PTT pool */ 285212e09c69SYuval Mintz rc = qed_ptt_pool_alloc(p_hwfn); 28532591c280SJoe Perches if (rc) 2854fe56b9e6SYuval Mintz goto err0; 2855fe56b9e6SYuval Mintz 285612e09c69SYuval Mintz /* Allocate the main PTT */ 285712e09c69SYuval Mintz p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 285812e09c69SYuval Mintz 2859fe56b9e6SYuval Mintz /* First hwfn learns basic information, e.g., number of hwfns */ 286012e09c69SYuval Mintz if (!p_hwfn->my_id) { 286115582962SRahul Verma rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); 28621a635e48SYuval Mintz if (rc) 286312e09c69SYuval Mintz goto err1; 286412e09c69SYuval Mintz } 286512e09c69SYuval Mintz 286612e09c69SYuval Mintz qed_hw_hwfn_prepare(p_hwfn); 2867fe56b9e6SYuval Mintz 2868fe56b9e6SYuval Mintz /* Initialize MCP structure */ 2869fe56b9e6SYuval Mintz rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 2870fe56b9e6SYuval Mintz if (rc) { 2871fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Failed initializing mcp command\n"); 2872fe56b9e6SYuval Mintz goto err1; 2873fe56b9e6SYuval Mintz } 2874fe56b9e6SYuval Mintz 2875fe56b9e6SYuval Mintz /* Read the device configuration information from the HW and SHMEM */ 2876fe56b9e6SYuval Mintz rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality); 2877fe56b9e6SYuval Mintz if (rc) { 2878fe56b9e6SYuval Mintz DP_NOTICE(p_hwfn, "Failed to get HW information\n"); 2879fe56b9e6SYuval Mintz goto err2; 2880fe56b9e6SYuval Mintz } 2881fe56b9e6SYuval Mintz 288218a69e36SMintz, Yuval /* Sending a mailbox to the MFW should be done after qed_get_hw_info() 288318a69e36SMintz, Yuval * is called as it sets the ports number in an engine. 288418a69e36SMintz, Yuval */ 288518a69e36SMintz, Yuval if (IS_LEAD_HWFN(p_hwfn)) { 288618a69e36SMintz, Yuval rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 288718a69e36SMintz, Yuval if (rc) 288818a69e36SMintz, Yuval DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); 288918a69e36SMintz, Yuval } 289018a69e36SMintz, Yuval 2891fe56b9e6SYuval Mintz /* Allocate the init RT array and initialize the init-ops engine */ 2892fe56b9e6SYuval Mintz rc = qed_init_alloc(p_hwfn); 28932591c280SJoe Perches if (rc) 2894fe56b9e6SYuval Mintz goto err2; 2895fe56b9e6SYuval Mintz 2896fe56b9e6SYuval Mintz return rc; 2897fe56b9e6SYuval Mintz err2: 289832a47e72SYuval Mintz if (IS_LEAD_HWFN(p_hwfn)) 289932a47e72SYuval Mintz qed_iov_free_hw_info(p_hwfn->cdev); 2900fe56b9e6SYuval Mintz qed_mcp_free(p_hwfn); 2901fe56b9e6SYuval Mintz err1: 2902fe56b9e6SYuval Mintz qed_hw_hwfn_free(p_hwfn); 2903fe56b9e6SYuval Mintz err0: 2904fe56b9e6SYuval Mintz return rc; 2905fe56b9e6SYuval Mintz } 2906fe56b9e6SYuval Mintz 2907fe56b9e6SYuval Mintz int qed_hw_prepare(struct qed_dev *cdev, 2908fe56b9e6SYuval Mintz int personality) 2909fe56b9e6SYuval Mintz { 2910c78df14eSAriel Elior struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2911c78df14eSAriel Elior int rc; 2912fe56b9e6SYuval Mintz 2913fe56b9e6SYuval Mintz /* Store the precompiled init data ptrs */ 29141408cc1fSYuval Mintz if (IS_PF(cdev)) 2915fe56b9e6SYuval Mintz qed_init_iro_array(cdev); 2916fe56b9e6SYuval Mintz 2917fe56b9e6SYuval Mintz /* Initialize the first hwfn - will learn number of hwfns */ 2918c78df14eSAriel Elior rc = qed_hw_prepare_single(p_hwfn, 2919c78df14eSAriel Elior cdev->regview, 2920fe56b9e6SYuval Mintz cdev->doorbells, personality); 2921fe56b9e6SYuval Mintz if (rc) 2922fe56b9e6SYuval Mintz return rc; 2923fe56b9e6SYuval Mintz 2924c78df14eSAriel Elior personality = p_hwfn->hw_info.personality; 2925fe56b9e6SYuval Mintz 2926fe56b9e6SYuval Mintz /* Initialize the rest of the hwfns */ 2927c78df14eSAriel Elior if (cdev->num_hwfns > 1) { 2928fe56b9e6SYuval Mintz void __iomem *p_regview, *p_doorbell; 2929c78df14eSAriel Elior u8 __iomem *addr; 2930fe56b9e6SYuval Mintz 2931c78df14eSAriel Elior /* adjust bar offset for second engine */ 293215582962SRahul Verma addr = cdev->regview + 293315582962SRahul Verma qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, 293415582962SRahul Verma BAR_ID_0) / 2; 2935c78df14eSAriel Elior p_regview = addr; 2936c78df14eSAriel Elior 293715582962SRahul Verma addr = cdev->doorbells + 293815582962SRahul Verma qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, 293915582962SRahul Verma BAR_ID_1) / 2; 2940c78df14eSAriel Elior p_doorbell = addr; 2941c78df14eSAriel Elior 2942c78df14eSAriel Elior /* prepare second hw function */ 2943c78df14eSAriel Elior rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, 2944fe56b9e6SYuval Mintz p_doorbell, personality); 2945c78df14eSAriel Elior 2946c78df14eSAriel Elior /* in case of error, need to free the previously 2947c78df14eSAriel Elior * initiliazed hwfn 0. 2948c78df14eSAriel Elior */ 2949fe56b9e6SYuval Mintz if (rc) { 29501408cc1fSYuval Mintz if (IS_PF(cdev)) { 2951c78df14eSAriel Elior qed_init_free(p_hwfn); 2952c78df14eSAriel Elior qed_mcp_free(p_hwfn); 2953c78df14eSAriel Elior qed_hw_hwfn_free(p_hwfn); 2954fe56b9e6SYuval Mintz } 2955fe56b9e6SYuval Mintz } 29561408cc1fSYuval Mintz } 2957fe56b9e6SYuval Mintz 2958c78df14eSAriel Elior return rc; 2959fe56b9e6SYuval Mintz } 2960fe56b9e6SYuval Mintz 2961fe56b9e6SYuval Mintz void qed_hw_remove(struct qed_dev *cdev) 2962fe56b9e6SYuval Mintz { 29630fefbfbaSSudarsana Kalluru struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2964fe56b9e6SYuval Mintz int i; 2965fe56b9e6SYuval Mintz 29660fefbfbaSSudarsana Kalluru if (IS_PF(cdev)) 29670fefbfbaSSudarsana Kalluru qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 29680fefbfbaSSudarsana Kalluru QED_OV_DRIVER_STATE_NOT_LOADED); 29690fefbfbaSSudarsana Kalluru 2970fe56b9e6SYuval Mintz for_each_hwfn(cdev, i) { 2971fe56b9e6SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2972fe56b9e6SYuval Mintz 29731408cc1fSYuval Mintz if (IS_VF(cdev)) { 29740b55e27dSYuval Mintz qed_vf_pf_release(p_hwfn); 29751408cc1fSYuval Mintz continue; 29761408cc1fSYuval Mintz } 29771408cc1fSYuval Mintz 2978fe56b9e6SYuval Mintz qed_init_free(p_hwfn); 2979fe56b9e6SYuval Mintz qed_hw_hwfn_free(p_hwfn); 2980fe56b9e6SYuval Mintz qed_mcp_free(p_hwfn); 2981fe56b9e6SYuval Mintz } 298232a47e72SYuval Mintz 298332a47e72SYuval Mintz qed_iov_free_hw_info(cdev); 2984fe56b9e6SYuval Mintz } 2985fe56b9e6SYuval Mintz 2986a91eb52aSYuval Mintz static void qed_chain_free_next_ptr(struct qed_dev *cdev, 2987a91eb52aSYuval Mintz struct qed_chain *p_chain) 2988a91eb52aSYuval Mintz { 2989a91eb52aSYuval Mintz void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL; 2990a91eb52aSYuval Mintz dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 2991a91eb52aSYuval Mintz struct qed_chain_next *p_next; 2992a91eb52aSYuval Mintz u32 size, i; 2993a91eb52aSYuval Mintz 2994a91eb52aSYuval Mintz if (!p_virt) 2995a91eb52aSYuval Mintz return; 2996a91eb52aSYuval Mintz 2997a91eb52aSYuval Mintz size = p_chain->elem_size * p_chain->usable_per_page; 2998a91eb52aSYuval Mintz 2999a91eb52aSYuval Mintz for (i = 0; i < p_chain->page_cnt; i++) { 3000a91eb52aSYuval Mintz if (!p_virt) 3001a91eb52aSYuval Mintz break; 3002a91eb52aSYuval Mintz 3003a91eb52aSYuval Mintz p_next = (struct qed_chain_next *)((u8 *)p_virt + size); 3004a91eb52aSYuval Mintz p_virt_next = p_next->next_virt; 3005a91eb52aSYuval Mintz p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 3006a91eb52aSYuval Mintz 3007a91eb52aSYuval Mintz dma_free_coherent(&cdev->pdev->dev, 3008a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, p_virt, p_phys); 3009a91eb52aSYuval Mintz 3010a91eb52aSYuval Mintz p_virt = p_virt_next; 3011a91eb52aSYuval Mintz p_phys = p_phys_next; 3012a91eb52aSYuval Mintz } 3013a91eb52aSYuval Mintz } 3014a91eb52aSYuval Mintz 3015a91eb52aSYuval Mintz static void qed_chain_free_single(struct qed_dev *cdev, 3016a91eb52aSYuval Mintz struct qed_chain *p_chain) 3017a91eb52aSYuval Mintz { 3018a91eb52aSYuval Mintz if (!p_chain->p_virt_addr) 3019a91eb52aSYuval Mintz return; 3020a91eb52aSYuval Mintz 3021a91eb52aSYuval Mintz dma_free_coherent(&cdev->pdev->dev, 3022a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, 3023a91eb52aSYuval Mintz p_chain->p_virt_addr, p_chain->p_phys_addr); 3024a91eb52aSYuval Mintz } 3025a91eb52aSYuval Mintz 3026a91eb52aSYuval Mintz static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) 3027a91eb52aSYuval Mintz { 3028a91eb52aSYuval Mintz void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 3029a91eb52aSYuval Mintz u32 page_cnt = p_chain->page_cnt, i, pbl_size; 30306d937acfSMintz, Yuval u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table; 3031a91eb52aSYuval Mintz 3032a91eb52aSYuval Mintz if (!pp_virt_addr_tbl) 3033a91eb52aSYuval Mintz return; 3034a91eb52aSYuval Mintz 30356d937acfSMintz, Yuval if (!p_pbl_virt) 3036a91eb52aSYuval Mintz goto out; 3037a91eb52aSYuval Mintz 3038a91eb52aSYuval Mintz for (i = 0; i < page_cnt; i++) { 3039a91eb52aSYuval Mintz if (!pp_virt_addr_tbl[i]) 3040a91eb52aSYuval Mintz break; 3041a91eb52aSYuval Mintz 3042a91eb52aSYuval Mintz dma_free_coherent(&cdev->pdev->dev, 3043a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, 3044a91eb52aSYuval Mintz pp_virt_addr_tbl[i], 3045a91eb52aSYuval Mintz *(dma_addr_t *)p_pbl_virt); 3046a91eb52aSYuval Mintz 3047a91eb52aSYuval Mintz p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; 3048a91eb52aSYuval Mintz } 3049a91eb52aSYuval Mintz 3050a91eb52aSYuval Mintz pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 3051a91eb52aSYuval Mintz dma_free_coherent(&cdev->pdev->dev, 3052a91eb52aSYuval Mintz pbl_size, 30536d937acfSMintz, Yuval p_chain->pbl_sp.p_virt_table, 30546d937acfSMintz, Yuval p_chain->pbl_sp.p_phys_table); 3055a91eb52aSYuval Mintz out: 3056a91eb52aSYuval Mintz vfree(p_chain->pbl.pp_virt_addr_tbl); 3057a91eb52aSYuval Mintz } 3058a91eb52aSYuval Mintz 3059a91eb52aSYuval Mintz void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) 3060a91eb52aSYuval Mintz { 3061a91eb52aSYuval Mintz switch (p_chain->mode) { 3062a91eb52aSYuval Mintz case QED_CHAIN_MODE_NEXT_PTR: 3063a91eb52aSYuval Mintz qed_chain_free_next_ptr(cdev, p_chain); 3064a91eb52aSYuval Mintz break; 3065a91eb52aSYuval Mintz case QED_CHAIN_MODE_SINGLE: 3066a91eb52aSYuval Mintz qed_chain_free_single(cdev, p_chain); 3067a91eb52aSYuval Mintz break; 3068a91eb52aSYuval Mintz case QED_CHAIN_MODE_PBL: 3069a91eb52aSYuval Mintz qed_chain_free_pbl(cdev, p_chain); 3070a91eb52aSYuval Mintz break; 3071a91eb52aSYuval Mintz } 3072a91eb52aSYuval Mintz } 3073a91eb52aSYuval Mintz 3074a91eb52aSYuval Mintz static int 3075a91eb52aSYuval Mintz qed_chain_alloc_sanity_check(struct qed_dev *cdev, 3076a91eb52aSYuval Mintz enum qed_chain_cnt_type cnt_type, 3077a91eb52aSYuval Mintz size_t elem_size, u32 page_cnt) 3078a91eb52aSYuval Mintz { 3079a91eb52aSYuval Mintz u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 3080a91eb52aSYuval Mintz 3081a91eb52aSYuval Mintz /* The actual chain size can be larger than the maximal possible value 3082a91eb52aSYuval Mintz * after rounding up the requested elements number to pages, and after 3083a91eb52aSYuval Mintz * taking into acount the unusuable elements (next-ptr elements). 3084a91eb52aSYuval Mintz * The size of a "u16" chain can be (U16_MAX + 1) since the chain 3085a91eb52aSYuval Mintz * size/capacity fields are of a u32 type. 3086a91eb52aSYuval Mintz */ 3087a91eb52aSYuval Mintz if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && 30883ef310a7STomer Tayar chain_size > ((u32)U16_MAX + 1)) || 30893ef310a7STomer Tayar (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) { 3090a91eb52aSYuval Mintz DP_NOTICE(cdev, 3091a91eb52aSYuval Mintz "The actual chain size (0x%llx) is larger than the maximal possible value\n", 3092a91eb52aSYuval Mintz chain_size); 3093a91eb52aSYuval Mintz return -EINVAL; 3094a91eb52aSYuval Mintz } 3095a91eb52aSYuval Mintz 3096a91eb52aSYuval Mintz return 0; 3097a91eb52aSYuval Mintz } 3098a91eb52aSYuval Mintz 3099a91eb52aSYuval Mintz static int 3100a91eb52aSYuval Mintz qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain) 3101a91eb52aSYuval Mintz { 3102a91eb52aSYuval Mintz void *p_virt = NULL, *p_virt_prev = NULL; 3103a91eb52aSYuval Mintz dma_addr_t p_phys = 0; 3104a91eb52aSYuval Mintz u32 i; 3105a91eb52aSYuval Mintz 3106a91eb52aSYuval Mintz for (i = 0; i < p_chain->page_cnt; i++) { 3107a91eb52aSYuval Mintz p_virt = dma_alloc_coherent(&cdev->pdev->dev, 3108a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, 3109a91eb52aSYuval Mintz &p_phys, GFP_KERNEL); 31102591c280SJoe Perches if (!p_virt) 3111a91eb52aSYuval Mintz return -ENOMEM; 3112a91eb52aSYuval Mintz 3113a91eb52aSYuval Mintz if (i == 0) { 3114a91eb52aSYuval Mintz qed_chain_init_mem(p_chain, p_virt, p_phys); 3115a91eb52aSYuval Mintz qed_chain_reset(p_chain); 3116a91eb52aSYuval Mintz } else { 3117a91eb52aSYuval Mintz qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, 3118a91eb52aSYuval Mintz p_virt, p_phys); 3119a91eb52aSYuval Mintz } 3120a91eb52aSYuval Mintz 3121a91eb52aSYuval Mintz p_virt_prev = p_virt; 3122a91eb52aSYuval Mintz } 3123a91eb52aSYuval Mintz /* Last page's next element should point to the beginning of the 3124a91eb52aSYuval Mintz * chain. 3125a91eb52aSYuval Mintz */ 3126a91eb52aSYuval Mintz qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, 3127a91eb52aSYuval Mintz p_chain->p_virt_addr, 3128a91eb52aSYuval Mintz p_chain->p_phys_addr); 3129a91eb52aSYuval Mintz 3130a91eb52aSYuval Mintz return 0; 3131a91eb52aSYuval Mintz } 3132a91eb52aSYuval Mintz 3133a91eb52aSYuval Mintz static int 3134a91eb52aSYuval Mintz qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) 3135a91eb52aSYuval Mintz { 3136a91eb52aSYuval Mintz dma_addr_t p_phys = 0; 3137a91eb52aSYuval Mintz void *p_virt = NULL; 3138a91eb52aSYuval Mintz 3139a91eb52aSYuval Mintz p_virt = dma_alloc_coherent(&cdev->pdev->dev, 3140a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); 31412591c280SJoe Perches if (!p_virt) 3142a91eb52aSYuval Mintz return -ENOMEM; 3143a91eb52aSYuval Mintz 3144a91eb52aSYuval Mintz qed_chain_init_mem(p_chain, p_virt, p_phys); 3145a91eb52aSYuval Mintz qed_chain_reset(p_chain); 3146a91eb52aSYuval Mintz 3147a91eb52aSYuval Mintz return 0; 3148a91eb52aSYuval Mintz } 3149a91eb52aSYuval Mintz 3150a91eb52aSYuval Mintz static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) 3151a91eb52aSYuval Mintz { 3152a91eb52aSYuval Mintz u32 page_cnt = p_chain->page_cnt, size, i; 3153a91eb52aSYuval Mintz dma_addr_t p_phys = 0, p_pbl_phys = 0; 3154a91eb52aSYuval Mintz void **pp_virt_addr_tbl = NULL; 3155a91eb52aSYuval Mintz u8 *p_pbl_virt = NULL; 3156a91eb52aSYuval Mintz void *p_virt = NULL; 3157a91eb52aSYuval Mintz 3158a91eb52aSYuval Mintz size = page_cnt * sizeof(*pp_virt_addr_tbl); 31592591c280SJoe Perches pp_virt_addr_tbl = vzalloc(size); 31602591c280SJoe Perches if (!pp_virt_addr_tbl) 3161a91eb52aSYuval Mintz return -ENOMEM; 3162a91eb52aSYuval Mintz 3163a91eb52aSYuval Mintz /* The allocation of the PBL table is done with its full size, since it 3164a91eb52aSYuval Mintz * is expected to be successive. 3165a91eb52aSYuval Mintz * qed_chain_init_pbl_mem() is called even in a case of an allocation 3166a91eb52aSYuval Mintz * failure, since pp_virt_addr_tbl was previously allocated, and it 3167a91eb52aSYuval Mintz * should be saved to allow its freeing during the error flow. 3168a91eb52aSYuval Mintz */ 3169a91eb52aSYuval Mintz size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 3170a91eb52aSYuval Mintz p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, 3171a91eb52aSYuval Mintz size, &p_pbl_phys, GFP_KERNEL); 3172a91eb52aSYuval Mintz qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 3173a91eb52aSYuval Mintz pp_virt_addr_tbl); 31742591c280SJoe Perches if (!p_pbl_virt) 3175a91eb52aSYuval Mintz return -ENOMEM; 3176a91eb52aSYuval Mintz 3177a91eb52aSYuval Mintz for (i = 0; i < page_cnt; i++) { 3178a91eb52aSYuval Mintz p_virt = dma_alloc_coherent(&cdev->pdev->dev, 3179a91eb52aSYuval Mintz QED_CHAIN_PAGE_SIZE, 3180a91eb52aSYuval Mintz &p_phys, GFP_KERNEL); 31812591c280SJoe Perches if (!p_virt) 3182a91eb52aSYuval Mintz return -ENOMEM; 3183a91eb52aSYuval Mintz 3184a91eb52aSYuval Mintz if (i == 0) { 3185a91eb52aSYuval Mintz qed_chain_init_mem(p_chain, p_virt, p_phys); 3186a91eb52aSYuval Mintz qed_chain_reset(p_chain); 3187a91eb52aSYuval Mintz } 3188a91eb52aSYuval Mintz 3189a91eb52aSYuval Mintz /* Fill the PBL table with the physical address of the page */ 3190a91eb52aSYuval Mintz *(dma_addr_t *)p_pbl_virt = p_phys; 3191a91eb52aSYuval Mintz /* Keep the virtual address of the page */ 3192a91eb52aSYuval Mintz p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 3193a91eb52aSYuval Mintz 3194a91eb52aSYuval Mintz p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; 3195a91eb52aSYuval Mintz } 3196a91eb52aSYuval Mintz 3197a91eb52aSYuval Mintz return 0; 3198a91eb52aSYuval Mintz } 3199a91eb52aSYuval Mintz 3200fe56b9e6SYuval Mintz int qed_chain_alloc(struct qed_dev *cdev, 3201fe56b9e6SYuval Mintz enum qed_chain_use_mode intended_use, 3202fe56b9e6SYuval Mintz enum qed_chain_mode mode, 3203a91eb52aSYuval Mintz enum qed_chain_cnt_type cnt_type, 3204a91eb52aSYuval Mintz u32 num_elems, size_t elem_size, struct qed_chain *p_chain) 3205fe56b9e6SYuval Mintz { 3206a91eb52aSYuval Mintz u32 page_cnt; 3207a91eb52aSYuval Mintz int rc = 0; 3208fe56b9e6SYuval Mintz 3209fe56b9e6SYuval Mintz if (mode == QED_CHAIN_MODE_SINGLE) 3210fe56b9e6SYuval Mintz page_cnt = 1; 3211fe56b9e6SYuval Mintz else 3212fe56b9e6SYuval Mintz page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 3213fe56b9e6SYuval Mintz 3214a91eb52aSYuval Mintz rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); 3215a91eb52aSYuval Mintz if (rc) { 3216a91eb52aSYuval Mintz DP_NOTICE(cdev, 32172591c280SJoe Perches "Cannot allocate a chain with the given arguments:\n"); 32182591c280SJoe Perches DP_NOTICE(cdev, 3219a91eb52aSYuval Mintz "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 3220a91eb52aSYuval Mintz intended_use, mode, cnt_type, num_elems, elem_size); 3221a91eb52aSYuval Mintz return rc; 3222fe56b9e6SYuval Mintz } 3223fe56b9e6SYuval Mintz 3224a91eb52aSYuval Mintz qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use, 3225a91eb52aSYuval Mintz mode, cnt_type); 3226fe56b9e6SYuval Mintz 3227a91eb52aSYuval Mintz switch (mode) { 3228a91eb52aSYuval Mintz case QED_CHAIN_MODE_NEXT_PTR: 3229a91eb52aSYuval Mintz rc = qed_chain_alloc_next_ptr(cdev, p_chain); 3230a91eb52aSYuval Mintz break; 3231a91eb52aSYuval Mintz case QED_CHAIN_MODE_SINGLE: 3232a91eb52aSYuval Mintz rc = qed_chain_alloc_single(cdev, p_chain); 3233a91eb52aSYuval Mintz break; 3234a91eb52aSYuval Mintz case QED_CHAIN_MODE_PBL: 3235a91eb52aSYuval Mintz rc = qed_chain_alloc_pbl(cdev, p_chain); 3236a91eb52aSYuval Mintz break; 3237fe56b9e6SYuval Mintz } 3238a91eb52aSYuval Mintz if (rc) 3239a91eb52aSYuval Mintz goto nomem; 3240fe56b9e6SYuval Mintz 3241fe56b9e6SYuval Mintz return 0; 3242fe56b9e6SYuval Mintz 3243fe56b9e6SYuval Mintz nomem: 3244a91eb52aSYuval Mintz qed_chain_free(cdev, p_chain); 3245a91eb52aSYuval Mintz return rc; 3246fe56b9e6SYuval Mintz } 3247fe56b9e6SYuval Mintz 3248a91eb52aSYuval Mintz int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) 3249cee4d264SManish Chopra { 3250cee4d264SManish Chopra if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { 3251cee4d264SManish Chopra u16 min, max; 3252cee4d264SManish Chopra 3253cee4d264SManish Chopra min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); 3254cee4d264SManish Chopra max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); 3255cee4d264SManish Chopra DP_NOTICE(p_hwfn, 3256cee4d264SManish Chopra "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 3257cee4d264SManish Chopra src_id, min, max); 3258cee4d264SManish Chopra 3259cee4d264SManish Chopra return -EINVAL; 3260cee4d264SManish Chopra } 3261cee4d264SManish Chopra 3262cee4d264SManish Chopra *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; 3263cee4d264SManish Chopra 3264cee4d264SManish Chopra return 0; 3265cee4d264SManish Chopra } 3266cee4d264SManish Chopra 32671a635e48SYuval Mintz int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) 3268cee4d264SManish Chopra { 3269cee4d264SManish Chopra if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { 3270cee4d264SManish Chopra u8 min, max; 3271cee4d264SManish Chopra 3272cee4d264SManish Chopra min = (u8)RESC_START(p_hwfn, QED_VPORT); 3273cee4d264SManish Chopra max = min + RESC_NUM(p_hwfn, QED_VPORT); 3274cee4d264SManish Chopra DP_NOTICE(p_hwfn, 3275cee4d264SManish Chopra "vport id [%d] is not valid, available indices [%d - %d]\n", 3276cee4d264SManish Chopra src_id, min, max); 3277cee4d264SManish Chopra 3278cee4d264SManish Chopra return -EINVAL; 3279cee4d264SManish Chopra } 3280cee4d264SManish Chopra 3281cee4d264SManish Chopra *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; 3282cee4d264SManish Chopra 3283cee4d264SManish Chopra return 0; 3284cee4d264SManish Chopra } 3285cee4d264SManish Chopra 32861a635e48SYuval Mintz int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) 3287cee4d264SManish Chopra { 3288cee4d264SManish Chopra if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { 3289cee4d264SManish Chopra u8 min, max; 3290cee4d264SManish Chopra 3291cee4d264SManish Chopra min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); 3292cee4d264SManish Chopra max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); 3293cee4d264SManish Chopra DP_NOTICE(p_hwfn, 3294cee4d264SManish Chopra "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 3295cee4d264SManish Chopra src_id, min, max); 3296cee4d264SManish Chopra 3297cee4d264SManish Chopra return -EINVAL; 3298cee4d264SManish Chopra } 3299cee4d264SManish Chopra 3300cee4d264SManish Chopra *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; 3301cee4d264SManish Chopra 3302cee4d264SManish Chopra return 0; 3303cee4d264SManish Chopra } 3304bcd197c8SManish Chopra 33050a7fb11cSYuval Mintz static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low, 33060a7fb11cSYuval Mintz u8 *p_filter) 33070a7fb11cSYuval Mintz { 33080a7fb11cSYuval Mintz *p_high = p_filter[1] | (p_filter[0] << 8); 33090a7fb11cSYuval Mintz *p_low = p_filter[5] | (p_filter[4] << 8) | 33100a7fb11cSYuval Mintz (p_filter[3] << 16) | (p_filter[2] << 24); 33110a7fb11cSYuval Mintz } 33120a7fb11cSYuval Mintz 33130a7fb11cSYuval Mintz int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, 33140a7fb11cSYuval Mintz struct qed_ptt *p_ptt, u8 *p_filter) 33150a7fb11cSYuval Mintz { 33160a7fb11cSYuval Mintz u32 high = 0, low = 0, en; 33170a7fb11cSYuval Mintz int i; 33180a7fb11cSYuval Mintz 33190a7fb11cSYuval Mintz if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 33200a7fb11cSYuval Mintz return 0; 33210a7fb11cSYuval Mintz 33220a7fb11cSYuval Mintz qed_llh_mac_to_filter(&high, &low, p_filter); 33230a7fb11cSYuval Mintz 33240a7fb11cSYuval Mintz /* Find a free entry and utilize it */ 33250a7fb11cSYuval Mintz for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 33260a7fb11cSYuval Mintz en = qed_rd(p_hwfn, p_ptt, 33270a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); 33280a7fb11cSYuval Mintz if (en) 33290a7fb11cSYuval Mintz continue; 33300a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33310a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 33320a7fb11cSYuval Mintz 2 * i * sizeof(u32), low); 33330a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33340a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 33350a7fb11cSYuval Mintz (2 * i + 1) * sizeof(u32), high); 33360a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33370a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); 33380a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33390a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 33400a7fb11cSYuval Mintz i * sizeof(u32), 0); 33410a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33420a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); 33430a7fb11cSYuval Mintz break; 33440a7fb11cSYuval Mintz } 33450a7fb11cSYuval Mintz if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { 33460a7fb11cSYuval Mintz DP_NOTICE(p_hwfn, 33470a7fb11cSYuval Mintz "Failed to find an empty LLH filter to utilize\n"); 33480a7fb11cSYuval Mintz return -EINVAL; 33490a7fb11cSYuval Mintz } 33500a7fb11cSYuval Mintz 33510a7fb11cSYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 33520a7fb11cSYuval Mintz "mac: %pM is added at %d\n", 33530a7fb11cSYuval Mintz p_filter, i); 33540a7fb11cSYuval Mintz 33550a7fb11cSYuval Mintz return 0; 33560a7fb11cSYuval Mintz } 33570a7fb11cSYuval Mintz 33580a7fb11cSYuval Mintz void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, 33590a7fb11cSYuval Mintz struct qed_ptt *p_ptt, u8 *p_filter) 33600a7fb11cSYuval Mintz { 33610a7fb11cSYuval Mintz u32 high = 0, low = 0; 33620a7fb11cSYuval Mintz int i; 33630a7fb11cSYuval Mintz 33640a7fb11cSYuval Mintz if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 33650a7fb11cSYuval Mintz return; 33660a7fb11cSYuval Mintz 33670a7fb11cSYuval Mintz qed_llh_mac_to_filter(&high, &low, p_filter); 33680a7fb11cSYuval Mintz 33690a7fb11cSYuval Mintz /* Find the entry and clean it */ 33700a7fb11cSYuval Mintz for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 33710a7fb11cSYuval Mintz if (qed_rd(p_hwfn, p_ptt, 33720a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 33730a7fb11cSYuval Mintz 2 * i * sizeof(u32)) != low) 33740a7fb11cSYuval Mintz continue; 33750a7fb11cSYuval Mintz if (qed_rd(p_hwfn, p_ptt, 33760a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 33770a7fb11cSYuval Mintz (2 * i + 1) * sizeof(u32)) != high) 33780a7fb11cSYuval Mintz continue; 33790a7fb11cSYuval Mintz 33800a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33810a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); 33820a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33830a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); 33840a7fb11cSYuval Mintz qed_wr(p_hwfn, p_ptt, 33850a7fb11cSYuval Mintz NIG_REG_LLH_FUNC_FILTER_VALUE + 33860a7fb11cSYuval Mintz (2 * i + 1) * sizeof(u32), 0); 33870a7fb11cSYuval Mintz 33880a7fb11cSYuval Mintz DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 33890a7fb11cSYuval Mintz "mac: %pM is removed from %d\n", 33900a7fb11cSYuval Mintz p_filter, i); 33910a7fb11cSYuval Mintz break; 33920a7fb11cSYuval Mintz } 33930a7fb11cSYuval Mintz if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 33940a7fb11cSYuval Mintz DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); 33950a7fb11cSYuval Mintz } 33960a7fb11cSYuval Mintz 33971e128c81SArun Easi int 33981e128c81SArun Easi qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, 33991e128c81SArun Easi struct qed_ptt *p_ptt, 34001e128c81SArun Easi u16 source_port_or_eth_type, 34011e128c81SArun Easi u16 dest_port, enum qed_llh_port_filter_type_t type) 34021e128c81SArun Easi { 34031e128c81SArun Easi u32 high = 0, low = 0, en; 34041e128c81SArun Easi int i; 34051e128c81SArun Easi 34061e128c81SArun Easi if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 34071e128c81SArun Easi return 0; 34081e128c81SArun Easi 34091e128c81SArun Easi switch (type) { 34101e128c81SArun Easi case QED_LLH_FILTER_ETHERTYPE: 34111e128c81SArun Easi high = source_port_or_eth_type; 34121e128c81SArun Easi break; 34131e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_PORT: 34141e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_PORT: 34151e128c81SArun Easi low = source_port_or_eth_type << 16; 34161e128c81SArun Easi break; 34171e128c81SArun Easi case QED_LLH_FILTER_TCP_DEST_PORT: 34181e128c81SArun Easi case QED_LLH_FILTER_UDP_DEST_PORT: 34191e128c81SArun Easi low = dest_port; 34201e128c81SArun Easi break; 34211e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 34221e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 34231e128c81SArun Easi low = (source_port_or_eth_type << 16) | dest_port; 34241e128c81SArun Easi break; 34251e128c81SArun Easi default: 34261e128c81SArun Easi DP_NOTICE(p_hwfn, 34271e128c81SArun Easi "Non valid LLH protocol filter type %d\n", type); 34281e128c81SArun Easi return -EINVAL; 34291e128c81SArun Easi } 34301e128c81SArun Easi /* Find a free entry and utilize it */ 34311e128c81SArun Easi for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 34321e128c81SArun Easi en = qed_rd(p_hwfn, p_ptt, 34331e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); 34341e128c81SArun Easi if (en) 34351e128c81SArun Easi continue; 34361e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 34371e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 34381e128c81SArun Easi 2 * i * sizeof(u32), low); 34391e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 34401e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 34411e128c81SArun Easi (2 * i + 1) * sizeof(u32), high); 34421e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 34431e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1); 34441e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 34451e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 34461e128c81SArun Easi i * sizeof(u32), 1 << type); 34471e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 34481e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); 34491e128c81SArun Easi break; 34501e128c81SArun Easi } 34511e128c81SArun Easi if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { 34521e128c81SArun Easi DP_NOTICE(p_hwfn, 34531e128c81SArun Easi "Failed to find an empty LLH filter to utilize\n"); 34541e128c81SArun Easi return -EINVAL; 34551e128c81SArun Easi } 34561e128c81SArun Easi switch (type) { 34571e128c81SArun Easi case QED_LLH_FILTER_ETHERTYPE: 34581e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34591e128c81SArun Easi "ETH type %x is added at %d\n", 34601e128c81SArun Easi source_port_or_eth_type, i); 34611e128c81SArun Easi break; 34621e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_PORT: 34631e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34641e128c81SArun Easi "TCP src port %x is added at %d\n", 34651e128c81SArun Easi source_port_or_eth_type, i); 34661e128c81SArun Easi break; 34671e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_PORT: 34681e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34691e128c81SArun Easi "UDP src port %x is added at %d\n", 34701e128c81SArun Easi source_port_or_eth_type, i); 34711e128c81SArun Easi break; 34721e128c81SArun Easi case QED_LLH_FILTER_TCP_DEST_PORT: 34731e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34741e128c81SArun Easi "TCP dst port %x is added at %d\n", dest_port, i); 34751e128c81SArun Easi break; 34761e128c81SArun Easi case QED_LLH_FILTER_UDP_DEST_PORT: 34771e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34781e128c81SArun Easi "UDP dst port %x is added at %d\n", dest_port, i); 34791e128c81SArun Easi break; 34801e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 34811e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34821e128c81SArun Easi "TCP src/dst ports %x/%x are added at %d\n", 34831e128c81SArun Easi source_port_or_eth_type, dest_port, i); 34841e128c81SArun Easi break; 34851e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 34861e128c81SArun Easi DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 34871e128c81SArun Easi "UDP src/dst ports %x/%x are added at %d\n", 34881e128c81SArun Easi source_port_or_eth_type, dest_port, i); 34891e128c81SArun Easi break; 34901e128c81SArun Easi } 34911e128c81SArun Easi return 0; 34921e128c81SArun Easi } 34931e128c81SArun Easi 34941e128c81SArun Easi void 34951e128c81SArun Easi qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, 34961e128c81SArun Easi struct qed_ptt *p_ptt, 34971e128c81SArun Easi u16 source_port_or_eth_type, 34981e128c81SArun Easi u16 dest_port, 34991e128c81SArun Easi enum qed_llh_port_filter_type_t type) 35001e128c81SArun Easi { 35011e128c81SArun Easi u32 high = 0, low = 0; 35021e128c81SArun Easi int i; 35031e128c81SArun Easi 35041e128c81SArun Easi if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 35051e128c81SArun Easi return; 35061e128c81SArun Easi 35071e128c81SArun Easi switch (type) { 35081e128c81SArun Easi case QED_LLH_FILTER_ETHERTYPE: 35091e128c81SArun Easi high = source_port_or_eth_type; 35101e128c81SArun Easi break; 35111e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_PORT: 35121e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_PORT: 35131e128c81SArun Easi low = source_port_or_eth_type << 16; 35141e128c81SArun Easi break; 35151e128c81SArun Easi case QED_LLH_FILTER_TCP_DEST_PORT: 35161e128c81SArun Easi case QED_LLH_FILTER_UDP_DEST_PORT: 35171e128c81SArun Easi low = dest_port; 35181e128c81SArun Easi break; 35191e128c81SArun Easi case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 35201e128c81SArun Easi case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 35211e128c81SArun Easi low = (source_port_or_eth_type << 16) | dest_port; 35221e128c81SArun Easi break; 35231e128c81SArun Easi default: 35241e128c81SArun Easi DP_NOTICE(p_hwfn, 35251e128c81SArun Easi "Non valid LLH protocol filter type %d\n", type); 35261e128c81SArun Easi return; 35271e128c81SArun Easi } 35281e128c81SArun Easi 35291e128c81SArun Easi for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 35301e128c81SArun Easi if (!qed_rd(p_hwfn, p_ptt, 35311e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32))) 35321e128c81SArun Easi continue; 35331e128c81SArun Easi if (!qed_rd(p_hwfn, p_ptt, 35341e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32))) 35351e128c81SArun Easi continue; 35361e128c81SArun Easi if (!(qed_rd(p_hwfn, p_ptt, 35371e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 35381e128c81SArun Easi i * sizeof(u32)) & BIT(type))) 35391e128c81SArun Easi continue; 35401e128c81SArun Easi if (qed_rd(p_hwfn, p_ptt, 35411e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 35421e128c81SArun Easi 2 * i * sizeof(u32)) != low) 35431e128c81SArun Easi continue; 35441e128c81SArun Easi if (qed_rd(p_hwfn, p_ptt, 35451e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 35461e128c81SArun Easi (2 * i + 1) * sizeof(u32)) != high) 35471e128c81SArun Easi continue; 35481e128c81SArun Easi 35491e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 35501e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); 35511e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 35521e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); 35531e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 35541e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 35551e128c81SArun Easi i * sizeof(u32), 0); 35561e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 35571e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); 35581e128c81SArun Easi qed_wr(p_hwfn, p_ptt, 35591e128c81SArun Easi NIG_REG_LLH_FUNC_FILTER_VALUE + 35601e128c81SArun Easi (2 * i + 1) * sizeof(u32), 0); 35611e128c81SArun Easi break; 35621e128c81SArun Easi } 35631e128c81SArun Easi 35641e128c81SArun Easi if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 35651e128c81SArun Easi DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); 35661e128c81SArun Easi } 35671e128c81SArun Easi 3568722003acSSudarsana Reddy Kalluru static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3569722003acSSudarsana Reddy Kalluru u32 hw_addr, void *p_eth_qzone, 3570722003acSSudarsana Reddy Kalluru size_t eth_qzone_size, u8 timeset) 3571722003acSSudarsana Reddy Kalluru { 3572722003acSSudarsana Reddy Kalluru struct coalescing_timeset *p_coal_timeset; 3573722003acSSudarsana Reddy Kalluru 3574722003acSSudarsana Reddy Kalluru if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) { 3575722003acSSudarsana Reddy Kalluru DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n"); 3576722003acSSudarsana Reddy Kalluru return -EINVAL; 3577722003acSSudarsana Reddy Kalluru } 3578722003acSSudarsana Reddy Kalluru 3579722003acSSudarsana Reddy Kalluru p_coal_timeset = p_eth_qzone; 3580722003acSSudarsana Reddy Kalluru memset(p_coal_timeset, 0, eth_qzone_size); 3581722003acSSudarsana Reddy Kalluru SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 3582722003acSSudarsana Reddy Kalluru SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 3583722003acSSudarsana Reddy Kalluru qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 3584722003acSSudarsana Reddy Kalluru 3585722003acSSudarsana Reddy Kalluru return 0; 3586722003acSSudarsana Reddy Kalluru } 3587722003acSSudarsana Reddy Kalluru 3588722003acSSudarsana Reddy Kalluru int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3589f870a3c6Ssudarsana.kalluru@cavium.com u16 coalesce, u16 qid, u16 sb_id) 3590722003acSSudarsana Reddy Kalluru { 3591722003acSSudarsana Reddy Kalluru struct ustorm_eth_queue_zone eth_qzone; 3592722003acSSudarsana Reddy Kalluru u8 timeset, timer_res; 3593722003acSSudarsana Reddy Kalluru u16 fw_qid = 0; 3594722003acSSudarsana Reddy Kalluru u32 address; 3595722003acSSudarsana Reddy Kalluru int rc; 3596722003acSSudarsana Reddy Kalluru 3597722003acSSudarsana Reddy Kalluru /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 3598722003acSSudarsana Reddy Kalluru if (coalesce <= 0x7F) { 3599722003acSSudarsana Reddy Kalluru timer_res = 0; 3600722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0xFF) { 3601722003acSSudarsana Reddy Kalluru timer_res = 1; 3602722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0x1FF) { 3603722003acSSudarsana Reddy Kalluru timer_res = 2; 3604722003acSSudarsana Reddy Kalluru } else { 3605722003acSSudarsana Reddy Kalluru DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 3606722003acSSudarsana Reddy Kalluru return -EINVAL; 3607722003acSSudarsana Reddy Kalluru } 3608722003acSSudarsana Reddy Kalluru timeset = (u8)(coalesce >> timer_res); 3609722003acSSudarsana Reddy Kalluru 3610f870a3c6Ssudarsana.kalluru@cavium.com rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); 3611722003acSSudarsana Reddy Kalluru if (rc) 3612722003acSSudarsana Reddy Kalluru return rc; 3613722003acSSudarsana Reddy Kalluru 3614722003acSSudarsana Reddy Kalluru rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false); 3615722003acSSudarsana Reddy Kalluru if (rc) 3616722003acSSudarsana Reddy Kalluru goto out; 3617722003acSSudarsana Reddy Kalluru 3618722003acSSudarsana Reddy Kalluru address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); 3619722003acSSudarsana Reddy Kalluru 3620722003acSSudarsana Reddy Kalluru rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 3621722003acSSudarsana Reddy Kalluru sizeof(struct ustorm_eth_queue_zone), timeset); 3622722003acSSudarsana Reddy Kalluru if (rc) 3623722003acSSudarsana Reddy Kalluru goto out; 3624722003acSSudarsana Reddy Kalluru 3625722003acSSudarsana Reddy Kalluru p_hwfn->cdev->rx_coalesce_usecs = coalesce; 3626722003acSSudarsana Reddy Kalluru out: 3627722003acSSudarsana Reddy Kalluru return rc; 3628722003acSSudarsana Reddy Kalluru } 3629722003acSSudarsana Reddy Kalluru 3630722003acSSudarsana Reddy Kalluru int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3631f870a3c6Ssudarsana.kalluru@cavium.com u16 coalesce, u16 qid, u16 sb_id) 3632722003acSSudarsana Reddy Kalluru { 3633722003acSSudarsana Reddy Kalluru struct xstorm_eth_queue_zone eth_qzone; 3634722003acSSudarsana Reddy Kalluru u8 timeset, timer_res; 3635722003acSSudarsana Reddy Kalluru u16 fw_qid = 0; 3636722003acSSudarsana Reddy Kalluru u32 address; 3637722003acSSudarsana Reddy Kalluru int rc; 3638722003acSSudarsana Reddy Kalluru 3639722003acSSudarsana Reddy Kalluru /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 3640722003acSSudarsana Reddy Kalluru if (coalesce <= 0x7F) { 3641722003acSSudarsana Reddy Kalluru timer_res = 0; 3642722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0xFF) { 3643722003acSSudarsana Reddy Kalluru timer_res = 1; 3644722003acSSudarsana Reddy Kalluru } else if (coalesce <= 0x1FF) { 3645722003acSSudarsana Reddy Kalluru timer_res = 2; 3646722003acSSudarsana Reddy Kalluru } else { 3647722003acSSudarsana Reddy Kalluru DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 3648722003acSSudarsana Reddy Kalluru return -EINVAL; 3649722003acSSudarsana Reddy Kalluru } 3650722003acSSudarsana Reddy Kalluru timeset = (u8)(coalesce >> timer_res); 3651722003acSSudarsana Reddy Kalluru 3652f870a3c6Ssudarsana.kalluru@cavium.com rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); 3653722003acSSudarsana Reddy Kalluru if (rc) 3654722003acSSudarsana Reddy Kalluru return rc; 3655722003acSSudarsana Reddy Kalluru 3656722003acSSudarsana Reddy Kalluru rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true); 3657722003acSSudarsana Reddy Kalluru if (rc) 3658722003acSSudarsana Reddy Kalluru goto out; 3659722003acSSudarsana Reddy Kalluru 3660722003acSSudarsana Reddy Kalluru address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); 3661722003acSSudarsana Reddy Kalluru 3662722003acSSudarsana Reddy Kalluru rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 3663722003acSSudarsana Reddy Kalluru sizeof(struct xstorm_eth_queue_zone), timeset); 3664722003acSSudarsana Reddy Kalluru if (rc) 3665722003acSSudarsana Reddy Kalluru goto out; 3666722003acSSudarsana Reddy Kalluru 3667722003acSSudarsana Reddy Kalluru p_hwfn->cdev->tx_coalesce_usecs = coalesce; 3668722003acSSudarsana Reddy Kalluru out: 3669722003acSSudarsana Reddy Kalluru return rc; 3670722003acSSudarsana Reddy Kalluru } 3671722003acSSudarsana Reddy Kalluru 3672bcd197c8SManish Chopra /* Calculate final WFQ values for all vports and configure them. 3673bcd197c8SManish Chopra * After this configuration each vport will have 3674bcd197c8SManish Chopra * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) 3675bcd197c8SManish Chopra */ 3676bcd197c8SManish Chopra static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, 3677bcd197c8SManish Chopra struct qed_ptt *p_ptt, 3678bcd197c8SManish Chopra u32 min_pf_rate) 3679bcd197c8SManish Chopra { 3680bcd197c8SManish Chopra struct init_qm_vport_params *vport_params; 3681bcd197c8SManish Chopra int i; 3682bcd197c8SManish Chopra 3683bcd197c8SManish Chopra vport_params = p_hwfn->qm_info.qm_vport_params; 3684bcd197c8SManish Chopra 3685bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 3686bcd197c8SManish Chopra u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 3687bcd197c8SManish Chopra 3688bcd197c8SManish Chopra vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) / 3689bcd197c8SManish Chopra min_pf_rate; 3690bcd197c8SManish Chopra qed_init_vport_wfq(p_hwfn, p_ptt, 3691bcd197c8SManish Chopra vport_params[i].first_tx_pq_id, 3692bcd197c8SManish Chopra vport_params[i].vport_wfq); 3693bcd197c8SManish Chopra } 3694bcd197c8SManish Chopra } 3695bcd197c8SManish Chopra 3696bcd197c8SManish Chopra static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, 3697bcd197c8SManish Chopra u32 min_pf_rate) 3698bcd197c8SManish Chopra 3699bcd197c8SManish Chopra { 3700bcd197c8SManish Chopra int i; 3701bcd197c8SManish Chopra 3702bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 3703bcd197c8SManish Chopra p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 3704bcd197c8SManish Chopra } 3705bcd197c8SManish Chopra 3706bcd197c8SManish Chopra static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, 3707bcd197c8SManish Chopra struct qed_ptt *p_ptt, 3708bcd197c8SManish Chopra u32 min_pf_rate) 3709bcd197c8SManish Chopra { 3710bcd197c8SManish Chopra struct init_qm_vport_params *vport_params; 3711bcd197c8SManish Chopra int i; 3712bcd197c8SManish Chopra 3713bcd197c8SManish Chopra vport_params = p_hwfn->qm_info.qm_vport_params; 3714bcd197c8SManish Chopra 3715bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 3716bcd197c8SManish Chopra qed_init_wfq_default_param(p_hwfn, min_pf_rate); 3717bcd197c8SManish Chopra qed_init_vport_wfq(p_hwfn, p_ptt, 3718bcd197c8SManish Chopra vport_params[i].first_tx_pq_id, 3719bcd197c8SManish Chopra vport_params[i].vport_wfq); 3720bcd197c8SManish Chopra } 3721bcd197c8SManish Chopra } 3722bcd197c8SManish Chopra 3723bcd197c8SManish Chopra /* This function performs several validations for WFQ 3724bcd197c8SManish Chopra * configuration and required min rate for a given vport 3725bcd197c8SManish Chopra * 1. req_rate must be greater than one percent of min_pf_rate. 3726bcd197c8SManish Chopra * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 3727bcd197c8SManish Chopra * rates to get less than one percent of min_pf_rate. 3728bcd197c8SManish Chopra * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 3729bcd197c8SManish Chopra */ 3730bcd197c8SManish Chopra static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, 37311a635e48SYuval Mintz u16 vport_id, u32 req_rate, u32 min_pf_rate) 3732bcd197c8SManish Chopra { 3733bcd197c8SManish Chopra u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 3734bcd197c8SManish Chopra int non_requested_count = 0, req_count = 0, i, num_vports; 3735bcd197c8SManish Chopra 3736bcd197c8SManish Chopra num_vports = p_hwfn->qm_info.num_vports; 3737bcd197c8SManish Chopra 3738bcd197c8SManish Chopra /* Accounting for the vports which are configured for WFQ explicitly */ 3739bcd197c8SManish Chopra for (i = 0; i < num_vports; i++) { 3740bcd197c8SManish Chopra u32 tmp_speed; 3741bcd197c8SManish Chopra 3742bcd197c8SManish Chopra if ((i != vport_id) && 3743bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[i].configured) { 3744bcd197c8SManish Chopra req_count++; 3745bcd197c8SManish Chopra tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 3746bcd197c8SManish Chopra total_req_min_rate += tmp_speed; 3747bcd197c8SManish Chopra } 3748bcd197c8SManish Chopra } 3749bcd197c8SManish Chopra 3750bcd197c8SManish Chopra /* Include current vport data as well */ 3751bcd197c8SManish Chopra req_count++; 3752bcd197c8SManish Chopra total_req_min_rate += req_rate; 3753bcd197c8SManish Chopra non_requested_count = num_vports - req_count; 3754bcd197c8SManish Chopra 3755bcd197c8SManish Chopra if (req_rate < min_pf_rate / QED_WFQ_UNIT) { 3756bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3757bcd197c8SManish Chopra "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 3758bcd197c8SManish Chopra vport_id, req_rate, min_pf_rate); 3759bcd197c8SManish Chopra return -EINVAL; 3760bcd197c8SManish Chopra } 3761bcd197c8SManish Chopra 3762bcd197c8SManish Chopra if (num_vports > QED_WFQ_UNIT) { 3763bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3764bcd197c8SManish Chopra "Number of vports is greater than %d\n", 3765bcd197c8SManish Chopra QED_WFQ_UNIT); 3766bcd197c8SManish Chopra return -EINVAL; 3767bcd197c8SManish Chopra } 3768bcd197c8SManish Chopra 3769bcd197c8SManish Chopra if (total_req_min_rate > min_pf_rate) { 3770bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3771bcd197c8SManish Chopra "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 3772bcd197c8SManish Chopra total_req_min_rate, min_pf_rate); 3773bcd197c8SManish Chopra return -EINVAL; 3774bcd197c8SManish Chopra } 3775bcd197c8SManish Chopra 3776bcd197c8SManish Chopra total_left_rate = min_pf_rate - total_req_min_rate; 3777bcd197c8SManish Chopra 3778bcd197c8SManish Chopra left_rate_per_vp = total_left_rate / non_requested_count; 3779bcd197c8SManish Chopra if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { 3780bcd197c8SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3781bcd197c8SManish Chopra "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 3782bcd197c8SManish Chopra left_rate_per_vp, min_pf_rate); 3783bcd197c8SManish Chopra return -EINVAL; 3784bcd197c8SManish Chopra } 3785bcd197c8SManish Chopra 3786bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 3787bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[vport_id].configured = true; 3788bcd197c8SManish Chopra 3789bcd197c8SManish Chopra for (i = 0; i < num_vports; i++) { 3790bcd197c8SManish Chopra if (p_hwfn->qm_info.wfq_data[i].configured) 3791bcd197c8SManish Chopra continue; 3792bcd197c8SManish Chopra 3793bcd197c8SManish Chopra p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 3794bcd197c8SManish Chopra } 3795bcd197c8SManish Chopra 3796bcd197c8SManish Chopra return 0; 3797bcd197c8SManish Chopra } 3798bcd197c8SManish Chopra 3799733def6aSYuval Mintz static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, 3800733def6aSYuval Mintz struct qed_ptt *p_ptt, u16 vp_id, u32 rate) 3801733def6aSYuval Mintz { 3802733def6aSYuval Mintz struct qed_mcp_link_state *p_link; 3803733def6aSYuval Mintz int rc = 0; 3804733def6aSYuval Mintz 3805733def6aSYuval Mintz p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; 3806733def6aSYuval Mintz 3807733def6aSYuval Mintz if (!p_link->min_pf_rate) { 3808733def6aSYuval Mintz p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 3809733def6aSYuval Mintz p_hwfn->qm_info.wfq_data[vp_id].configured = true; 3810733def6aSYuval Mintz return rc; 3811733def6aSYuval Mintz } 3812733def6aSYuval Mintz 3813733def6aSYuval Mintz rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 3814733def6aSYuval Mintz 38151a635e48SYuval Mintz if (!rc) 3816733def6aSYuval Mintz qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, 3817733def6aSYuval Mintz p_link->min_pf_rate); 3818733def6aSYuval Mintz else 3819733def6aSYuval Mintz DP_NOTICE(p_hwfn, 3820733def6aSYuval Mintz "Validation failed while configuring min rate\n"); 3821733def6aSYuval Mintz 3822733def6aSYuval Mintz return rc; 3823733def6aSYuval Mintz } 3824733def6aSYuval Mintz 3825bcd197c8SManish Chopra static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, 3826bcd197c8SManish Chopra struct qed_ptt *p_ptt, 3827bcd197c8SManish Chopra u32 min_pf_rate) 3828bcd197c8SManish Chopra { 3829bcd197c8SManish Chopra bool use_wfq = false; 3830bcd197c8SManish Chopra int rc = 0; 3831bcd197c8SManish Chopra u16 i; 3832bcd197c8SManish Chopra 3833bcd197c8SManish Chopra /* Validate all pre configured vports for wfq */ 3834bcd197c8SManish Chopra for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 3835bcd197c8SManish Chopra u32 rate; 3836bcd197c8SManish Chopra 3837bcd197c8SManish Chopra if (!p_hwfn->qm_info.wfq_data[i].configured) 3838bcd197c8SManish Chopra continue; 3839bcd197c8SManish Chopra 3840bcd197c8SManish Chopra rate = p_hwfn->qm_info.wfq_data[i].min_speed; 3841bcd197c8SManish Chopra use_wfq = true; 3842bcd197c8SManish Chopra 3843bcd197c8SManish Chopra rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 3844bcd197c8SManish Chopra if (rc) { 3845bcd197c8SManish Chopra DP_NOTICE(p_hwfn, 3846bcd197c8SManish Chopra "WFQ validation failed while configuring min rate\n"); 3847bcd197c8SManish Chopra break; 3848bcd197c8SManish Chopra } 3849bcd197c8SManish Chopra } 3850bcd197c8SManish Chopra 3851bcd197c8SManish Chopra if (!rc && use_wfq) 3852bcd197c8SManish Chopra qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 3853bcd197c8SManish Chopra else 3854bcd197c8SManish Chopra qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 3855bcd197c8SManish Chopra 3856bcd197c8SManish Chopra return rc; 3857bcd197c8SManish Chopra } 3858bcd197c8SManish Chopra 3859733def6aSYuval Mintz /* Main API for qed clients to configure vport min rate. 3860733def6aSYuval Mintz * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 3861733def6aSYuval Mintz * rate - Speed in Mbps needs to be assigned to a given vport. 3862733def6aSYuval Mintz */ 3863733def6aSYuval Mintz int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) 3864733def6aSYuval Mintz { 3865733def6aSYuval Mintz int i, rc = -EINVAL; 3866733def6aSYuval Mintz 3867733def6aSYuval Mintz /* Currently not supported; Might change in future */ 3868733def6aSYuval Mintz if (cdev->num_hwfns > 1) { 3869733def6aSYuval Mintz DP_NOTICE(cdev, 3870733def6aSYuval Mintz "WFQ configuration is not supported for this device\n"); 3871733def6aSYuval Mintz return rc; 3872733def6aSYuval Mintz } 3873733def6aSYuval Mintz 3874733def6aSYuval Mintz for_each_hwfn(cdev, i) { 3875733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3876733def6aSYuval Mintz struct qed_ptt *p_ptt; 3877733def6aSYuval Mintz 3878733def6aSYuval Mintz p_ptt = qed_ptt_acquire(p_hwfn); 3879733def6aSYuval Mintz if (!p_ptt) 3880733def6aSYuval Mintz return -EBUSY; 3881733def6aSYuval Mintz 3882733def6aSYuval Mintz rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 3883733def6aSYuval Mintz 3884d572c430SYuval Mintz if (rc) { 3885733def6aSYuval Mintz qed_ptt_release(p_hwfn, p_ptt); 3886733def6aSYuval Mintz return rc; 3887733def6aSYuval Mintz } 3888733def6aSYuval Mintz 3889733def6aSYuval Mintz qed_ptt_release(p_hwfn, p_ptt); 3890733def6aSYuval Mintz } 3891733def6aSYuval Mintz 3892733def6aSYuval Mintz return rc; 3893733def6aSYuval Mintz } 3894733def6aSYuval Mintz 3895bcd197c8SManish Chopra /* API to configure WFQ from mcp link change */ 38966f437d43SMintz, Yuval void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, 38976f437d43SMintz, Yuval struct qed_ptt *p_ptt, u32 min_pf_rate) 3898bcd197c8SManish Chopra { 3899bcd197c8SManish Chopra int i; 3900bcd197c8SManish Chopra 39013e7cfce2SYuval Mintz if (cdev->num_hwfns > 1) { 39023e7cfce2SYuval Mintz DP_VERBOSE(cdev, 39033e7cfce2SYuval Mintz NETIF_MSG_LINK, 39043e7cfce2SYuval Mintz "WFQ configuration is not supported for this device\n"); 39053e7cfce2SYuval Mintz return; 39063e7cfce2SYuval Mintz } 39073e7cfce2SYuval Mintz 3908bcd197c8SManish Chopra for_each_hwfn(cdev, i) { 3909bcd197c8SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3910bcd197c8SManish Chopra 39116f437d43SMintz, Yuval __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 3912bcd197c8SManish Chopra min_pf_rate); 3913bcd197c8SManish Chopra } 3914bcd197c8SManish Chopra } 39154b01e519SManish Chopra 39164b01e519SManish Chopra int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, 39174b01e519SManish Chopra struct qed_ptt *p_ptt, 39184b01e519SManish Chopra struct qed_mcp_link_state *p_link, 39194b01e519SManish Chopra u8 max_bw) 39204b01e519SManish Chopra { 39214b01e519SManish Chopra int rc = 0; 39224b01e519SManish Chopra 39234b01e519SManish Chopra p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 39244b01e519SManish Chopra 39254b01e519SManish Chopra if (!p_link->line_speed && (max_bw != 100)) 39264b01e519SManish Chopra return rc; 39274b01e519SManish Chopra 39284b01e519SManish Chopra p_link->speed = (p_link->line_speed * max_bw) / 100; 39294b01e519SManish Chopra p_hwfn->qm_info.pf_rl = p_link->speed; 39304b01e519SManish Chopra 39314b01e519SManish Chopra /* Since the limiter also affects Tx-switched traffic, we don't want it 39324b01e519SManish Chopra * to limit such traffic in case there's no actual limit. 39334b01e519SManish Chopra * In that case, set limit to imaginary high boundary. 39344b01e519SManish Chopra */ 39354b01e519SManish Chopra if (max_bw == 100) 39364b01e519SManish Chopra p_hwfn->qm_info.pf_rl = 100000; 39374b01e519SManish Chopra 39384b01e519SManish Chopra rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 39394b01e519SManish Chopra p_hwfn->qm_info.pf_rl); 39404b01e519SManish Chopra 39414b01e519SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 39424b01e519SManish Chopra "Configured MAX bandwidth to be %08x Mb/sec\n", 39434b01e519SManish Chopra p_link->speed); 39444b01e519SManish Chopra 39454b01e519SManish Chopra return rc; 39464b01e519SManish Chopra } 39474b01e519SManish Chopra 39484b01e519SManish Chopra /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 39494b01e519SManish Chopra int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) 39504b01e519SManish Chopra { 39514b01e519SManish Chopra int i, rc = -EINVAL; 39524b01e519SManish Chopra 39534b01e519SManish Chopra if (max_bw < 1 || max_bw > 100) { 39544b01e519SManish Chopra DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n"); 39554b01e519SManish Chopra return rc; 39564b01e519SManish Chopra } 39574b01e519SManish Chopra 39584b01e519SManish Chopra for_each_hwfn(cdev, i) { 39594b01e519SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 39604b01e519SManish Chopra struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); 39614b01e519SManish Chopra struct qed_mcp_link_state *p_link; 39624b01e519SManish Chopra struct qed_ptt *p_ptt; 39634b01e519SManish Chopra 39644b01e519SManish Chopra p_link = &p_lead->mcp_info->link_output; 39654b01e519SManish Chopra 39664b01e519SManish Chopra p_ptt = qed_ptt_acquire(p_hwfn); 39674b01e519SManish Chopra if (!p_ptt) 39684b01e519SManish Chopra return -EBUSY; 39694b01e519SManish Chopra 39704b01e519SManish Chopra rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, 39714b01e519SManish Chopra p_link, max_bw); 39724b01e519SManish Chopra 39734b01e519SManish Chopra qed_ptt_release(p_hwfn, p_ptt); 39744b01e519SManish Chopra 39754b01e519SManish Chopra if (rc) 39764b01e519SManish Chopra break; 39774b01e519SManish Chopra } 39784b01e519SManish Chopra 39794b01e519SManish Chopra return rc; 39804b01e519SManish Chopra } 3981a64b02d5SManish Chopra 3982a64b02d5SManish Chopra int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, 3983a64b02d5SManish Chopra struct qed_ptt *p_ptt, 3984a64b02d5SManish Chopra struct qed_mcp_link_state *p_link, 3985a64b02d5SManish Chopra u8 min_bw) 3986a64b02d5SManish Chopra { 3987a64b02d5SManish Chopra int rc = 0; 3988a64b02d5SManish Chopra 3989a64b02d5SManish Chopra p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 3990a64b02d5SManish Chopra p_hwfn->qm_info.pf_wfq = min_bw; 3991a64b02d5SManish Chopra 3992a64b02d5SManish Chopra if (!p_link->line_speed) 3993a64b02d5SManish Chopra return rc; 3994a64b02d5SManish Chopra 3995a64b02d5SManish Chopra p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 3996a64b02d5SManish Chopra 3997a64b02d5SManish Chopra rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 3998a64b02d5SManish Chopra 3999a64b02d5SManish Chopra DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 4000a64b02d5SManish Chopra "Configured MIN bandwidth to be %d Mb/sec\n", 4001a64b02d5SManish Chopra p_link->min_pf_rate); 4002a64b02d5SManish Chopra 4003a64b02d5SManish Chopra return rc; 4004a64b02d5SManish Chopra } 4005a64b02d5SManish Chopra 4006a64b02d5SManish Chopra /* Main API to configure PF min bandwidth where bw range is [1-100] */ 4007a64b02d5SManish Chopra int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) 4008a64b02d5SManish Chopra { 4009a64b02d5SManish Chopra int i, rc = -EINVAL; 4010a64b02d5SManish Chopra 4011a64b02d5SManish Chopra if (min_bw < 1 || min_bw > 100) { 4012a64b02d5SManish Chopra DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n"); 4013a64b02d5SManish Chopra return rc; 4014a64b02d5SManish Chopra } 4015a64b02d5SManish Chopra 4016a64b02d5SManish Chopra for_each_hwfn(cdev, i) { 4017a64b02d5SManish Chopra struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4018a64b02d5SManish Chopra struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); 4019a64b02d5SManish Chopra struct qed_mcp_link_state *p_link; 4020a64b02d5SManish Chopra struct qed_ptt *p_ptt; 4021a64b02d5SManish Chopra 4022a64b02d5SManish Chopra p_link = &p_lead->mcp_info->link_output; 4023a64b02d5SManish Chopra 4024a64b02d5SManish Chopra p_ptt = qed_ptt_acquire(p_hwfn); 4025a64b02d5SManish Chopra if (!p_ptt) 4026a64b02d5SManish Chopra return -EBUSY; 4027a64b02d5SManish Chopra 4028a64b02d5SManish Chopra rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, 4029a64b02d5SManish Chopra p_link, min_bw); 4030a64b02d5SManish Chopra if (rc) { 4031a64b02d5SManish Chopra qed_ptt_release(p_hwfn, p_ptt); 4032a64b02d5SManish Chopra return rc; 4033a64b02d5SManish Chopra } 4034a64b02d5SManish Chopra 4035a64b02d5SManish Chopra if (p_link->min_pf_rate) { 4036a64b02d5SManish Chopra u32 min_rate = p_link->min_pf_rate; 4037a64b02d5SManish Chopra 4038a64b02d5SManish Chopra rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, 4039a64b02d5SManish Chopra p_ptt, 4040a64b02d5SManish Chopra min_rate); 4041a64b02d5SManish Chopra } 4042a64b02d5SManish Chopra 4043a64b02d5SManish Chopra qed_ptt_release(p_hwfn, p_ptt); 4044a64b02d5SManish Chopra } 4045a64b02d5SManish Chopra 4046a64b02d5SManish Chopra return rc; 4047a64b02d5SManish Chopra } 4048733def6aSYuval Mintz 4049733def6aSYuval Mintz void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 4050733def6aSYuval Mintz { 4051733def6aSYuval Mintz struct qed_mcp_link_state *p_link; 4052733def6aSYuval Mintz 4053733def6aSYuval Mintz p_link = &p_hwfn->mcp_info->link_output; 4054733def6aSYuval Mintz 4055733def6aSYuval Mintz if (p_link->min_pf_rate) 4056733def6aSYuval Mintz qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, 4057733def6aSYuval Mintz p_link->min_pf_rate); 4058733def6aSYuval Mintz 4059733def6aSYuval Mintz memset(p_hwfn->qm_info.wfq_data, 0, 4060733def6aSYuval Mintz sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); 4061733def6aSYuval Mintz } 40629c79ddaaSMintz, Yuval 40639c79ddaaSMintz, Yuval int qed_device_num_engines(struct qed_dev *cdev) 40649c79ddaaSMintz, Yuval { 40659c79ddaaSMintz, Yuval return QED_IS_BB(cdev) ? 2 : 1; 40669c79ddaaSMintz, Yuval } 4067db82f70eSsudarsana.kalluru@cavium.com 4068db82f70eSsudarsana.kalluru@cavium.com static int qed_device_num_ports(struct qed_dev *cdev) 4069db82f70eSsudarsana.kalluru@cavium.com { 4070db82f70eSsudarsana.kalluru@cavium.com /* in CMT always only one port */ 4071db82f70eSsudarsana.kalluru@cavium.com if (cdev->num_hwfns > 1) 4072db82f70eSsudarsana.kalluru@cavium.com return 1; 4073db82f70eSsudarsana.kalluru@cavium.com 4074db82f70eSsudarsana.kalluru@cavium.com return cdev->num_ports_in_engines * qed_device_num_engines(cdev); 4075db82f70eSsudarsana.kalluru@cavium.com } 4076db82f70eSsudarsana.kalluru@cavium.com 4077db82f70eSsudarsana.kalluru@cavium.com int qed_device_get_port_id(struct qed_dev *cdev) 4078db82f70eSsudarsana.kalluru@cavium.com { 4079db82f70eSsudarsana.kalluru@cavium.com return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); 4080db82f70eSsudarsana.kalluru@cavium.com } 4081