1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <linux/io.h> 36 #include <linux/delay.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/errno.h> 39 #include <linux/kernel.h> 40 #include <linux/mutex.h> 41 #include <linux/pci.h> 42 #include <linux/slab.h> 43 #include <linux/string.h> 44 #include <linux/vmalloc.h> 45 #include <linux/etherdevice.h> 46 #include <linux/qed/qed_chain.h> 47 #include <linux/qed/qed_if.h> 48 #include "qed.h" 49 #include "qed_cxt.h" 50 #include "qed_dcbx.h" 51 #include "qed_dev_api.h" 52 #include "qed_fcoe.h" 53 #include "qed_hsi.h" 54 #include "qed_hw.h" 55 #include "qed_init_ops.h" 56 #include "qed_int.h" 57 #include "qed_iscsi.h" 58 #include "qed_ll2.h" 59 #include "qed_mcp.h" 60 #include "qed_ooo.h" 61 #include "qed_reg_addr.h" 62 #include "qed_sp.h" 63 #include "qed_sriov.h" 64 #include "qed_vf.h" 65 #include "qed_roce.h" 66 67 static DEFINE_SPINLOCK(qm_lock); 68 69 #define QED_MIN_DPIS (4) 70 #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) 71 72 /* API common to all protocols */ 73 enum BAR_ID { 74 BAR_ID_0, /* used for GRC */ 75 BAR_ID_1 /* Used for doorbells */ 76 }; 77 78 static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, 79 struct qed_ptt *p_ptt, enum BAR_ID bar_id) 80 { 81 u32 bar_reg = (bar_id == BAR_ID_0 ? 82 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 83 u32 val; 84 85 if (IS_VF(p_hwfn->cdev)) 86 return 1 << 17; 87 88 val = qed_rd(p_hwfn, p_ptt, bar_reg); 89 if (val) 90 return 1 << (val + 15); 91 92 /* Old MFW initialized above registered only conditionally */ 93 if (p_hwfn->cdev->num_hwfns > 1) { 94 DP_INFO(p_hwfn, 95 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 96 return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 97 } else { 98 DP_INFO(p_hwfn, 99 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 100 return 512 * 1024; 101 } 102 } 103 104 void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) 105 { 106 u32 i; 107 108 cdev->dp_level = dp_level; 109 cdev->dp_module = dp_module; 110 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 111 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 112 113 p_hwfn->dp_level = dp_level; 114 p_hwfn->dp_module = dp_module; 115 } 116 } 117 118 void qed_init_struct(struct qed_dev *cdev) 119 { 120 u8 i; 121 122 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { 123 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 124 125 p_hwfn->cdev = cdev; 126 p_hwfn->my_id = i; 127 p_hwfn->b_active = false; 128 129 mutex_init(&p_hwfn->dmae_info.mutex); 130 } 131 132 /* hwfn 0 is always active */ 133 cdev->hwfns[0].b_active = true; 134 135 /* set the default cache alignment to 128 */ 136 cdev->cache_shift = 7; 137 } 138 139 static void qed_qm_info_free(struct qed_hwfn *p_hwfn) 140 { 141 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 142 143 kfree(qm_info->qm_pq_params); 144 qm_info->qm_pq_params = NULL; 145 kfree(qm_info->qm_vport_params); 146 qm_info->qm_vport_params = NULL; 147 kfree(qm_info->qm_port_params); 148 qm_info->qm_port_params = NULL; 149 kfree(qm_info->wfq_data); 150 qm_info->wfq_data = NULL; 151 } 152 153 void qed_resc_free(struct qed_dev *cdev) 154 { 155 int i; 156 157 if (IS_VF(cdev)) 158 return; 159 160 kfree(cdev->fw_data); 161 cdev->fw_data = NULL; 162 163 kfree(cdev->reset_stats); 164 165 for_each_hwfn(cdev, i) { 166 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 167 168 qed_cxt_mngr_free(p_hwfn); 169 qed_qm_info_free(p_hwfn); 170 qed_spq_free(p_hwfn); 171 qed_eq_free(p_hwfn, p_hwfn->p_eq); 172 qed_consq_free(p_hwfn, p_hwfn->p_consq); 173 qed_int_free(p_hwfn); 174 #ifdef CONFIG_QED_LL2 175 qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); 176 #endif 177 if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 178 qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info); 179 180 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 181 qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info); 182 qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info); 183 } 184 qed_iov_free(p_hwfn); 185 qed_dmae_info_free(p_hwfn); 186 qed_dcbx_info_free(p_hwfn); 187 } 188 } 189 190 /******************** QM initialization *******************/ 191 #define ACTIVE_TCS_BMAP 0x9f 192 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf 193 194 /* determines the physical queue flags for a given PF. */ 195 static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) 196 { 197 u32 flags; 198 199 /* common flags */ 200 flags = PQ_FLAGS_LB; 201 202 /* feature flags */ 203 if (IS_QED_SRIOV(p_hwfn->cdev)) 204 flags |= PQ_FLAGS_VFS; 205 206 /* protocol flags */ 207 switch (p_hwfn->hw_info.personality) { 208 case QED_PCI_ETH: 209 flags |= PQ_FLAGS_MCOS; 210 break; 211 case QED_PCI_FCOE: 212 flags |= PQ_FLAGS_OFLD; 213 break; 214 case QED_PCI_ISCSI: 215 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; 216 break; 217 case QED_PCI_ETH_ROCE: 218 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; 219 break; 220 default: 221 DP_ERR(p_hwfn, 222 "unknown personality %d\n", p_hwfn->hw_info.personality); 223 return 0; 224 } 225 226 return flags; 227 } 228 229 /* Getters for resource amounts necessary for qm initialization */ 230 u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) 231 { 232 return p_hwfn->hw_info.num_hw_tc; 233 } 234 235 u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) 236 { 237 return IS_QED_SRIOV(p_hwfn->cdev) ? 238 p_hwfn->cdev->p_iov_info->total_vfs : 0; 239 } 240 241 #define NUM_DEFAULT_RLS 1 242 243 u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) 244 { 245 u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); 246 247 /* num RLs can't exceed resource amount of rls or vports */ 248 num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), 249 RESC_NUM(p_hwfn, QED_VPORT)); 250 251 /* Make sure after we reserve there's something left */ 252 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) 253 return 0; 254 255 /* subtract rls necessary for VFs and one default one for the PF */ 256 num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; 257 258 return num_pf_rls; 259 } 260 261 u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) 262 { 263 u32 pq_flags = qed_get_pq_flags(p_hwfn); 264 265 /* all pqs share the same vport, except for vfs and pf_rl pqs */ 266 return (!!(PQ_FLAGS_RLS & pq_flags)) * 267 qed_init_qm_get_num_pf_rls(p_hwfn) + 268 (!!(PQ_FLAGS_VFS & pq_flags)) * 269 qed_init_qm_get_num_vfs(p_hwfn) + 1; 270 } 271 272 /* calc amount of PQs according to the requested flags */ 273 u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) 274 { 275 u32 pq_flags = qed_get_pq_flags(p_hwfn); 276 277 return (!!(PQ_FLAGS_RLS & pq_flags)) * 278 qed_init_qm_get_num_pf_rls(p_hwfn) + 279 (!!(PQ_FLAGS_MCOS & pq_flags)) * 280 qed_init_qm_get_num_tcs(p_hwfn) + 281 (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) + 282 (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) + 283 (!!(PQ_FLAGS_LLT & pq_flags)) + 284 (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn); 285 } 286 287 /* initialize the top level QM params */ 288 static void qed_init_qm_params(struct qed_hwfn *p_hwfn) 289 { 290 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 291 bool four_port; 292 293 /* pq and vport bases for this PF */ 294 qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); 295 qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); 296 297 /* rate limiting and weighted fair queueing are always enabled */ 298 qm_info->vport_rl_en = 1; 299 qm_info->vport_wfq_en = 1; 300 301 /* TC config is different for AH 4 port */ 302 four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2; 303 304 /* in AH 4 port we have fewer TCs per port */ 305 qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : 306 NUM_OF_PHYS_TCS; 307 308 /* unless MFW indicated otherwise, ooo_tc == 3 for 309 * AH 4-port and 4 otherwise. 310 */ 311 if (!qm_info->ooo_tc) 312 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : 313 DCBX_TCP_OOO_TC; 314 } 315 316 /* initialize qm vport params */ 317 static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) 318 { 319 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 320 u8 i; 321 322 /* all vports participate in weighted fair queueing */ 323 for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++) 324 qm_info->qm_vport_params[i].vport_wfq = 1; 325 } 326 327 /* initialize qm port params */ 328 static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) 329 { 330 /* Initialize qm port parameters */ 331 u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines; 332 333 /* indicate how ooo and high pri traffic is dealt with */ 334 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? 335 ACTIVE_TCS_BMAP_4PORT_K2 : 336 ACTIVE_TCS_BMAP; 337 338 for (i = 0; i < num_ports; i++) { 339 struct init_qm_port_params *p_qm_port = 340 &p_hwfn->qm_info.qm_port_params[i]; 341 342 p_qm_port->active = 1; 343 p_qm_port->active_phys_tcs = active_phys_tcs; 344 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; 345 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; 346 } 347 } 348 349 /* Reset the params which must be reset for qm init. QM init may be called as 350 * a result of flows other than driver load (e.g. dcbx renegotiation). Other 351 * params may be affected by the init but would simply recalculate to the same 352 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not 353 * affected as these amounts stay the same. 354 */ 355 static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn) 356 { 357 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 358 359 qm_info->num_pqs = 0; 360 qm_info->num_vports = 0; 361 qm_info->num_pf_rls = 0; 362 qm_info->num_vf_pqs = 0; 363 qm_info->first_vf_pq = 0; 364 qm_info->first_mcos_pq = 0; 365 qm_info->first_rl_pq = 0; 366 } 367 368 static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) 369 { 370 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 371 372 qm_info->num_vports++; 373 374 if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) 375 DP_ERR(p_hwfn, 376 "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", 377 qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); 378 } 379 380 /* initialize a single pq and manage qm_info resources accounting. 381 * The pq_init_flags param determines whether the PQ is rate limited 382 * (for VF or PF) and whether a new vport is allocated to the pq or not 383 * (i.e. vport will be shared). 384 */ 385 386 /* flags for pq init */ 387 #define PQ_INIT_SHARE_VPORT (1 << 0) 388 #define PQ_INIT_PF_RL (1 << 1) 389 #define PQ_INIT_VF_RL (1 << 2) 390 391 /* defines for pq init */ 392 #define PQ_INIT_DEFAULT_WRR_GROUP 1 393 #define PQ_INIT_DEFAULT_TC 0 394 #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) 395 396 static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, 397 struct qed_qm_info *qm_info, 398 u8 tc, u32 pq_init_flags) 399 { 400 u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn); 401 402 if (pq_idx > max_pq) 403 DP_ERR(p_hwfn, 404 "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); 405 406 /* init pq params */ 407 qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + 408 qm_info->num_vports; 409 qm_info->qm_pq_params[pq_idx].tc_id = tc; 410 qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; 411 qm_info->qm_pq_params[pq_idx].rl_valid = 412 (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); 413 414 /* qm params accounting */ 415 qm_info->num_pqs++; 416 if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) 417 qm_info->num_vports++; 418 419 if (pq_init_flags & PQ_INIT_PF_RL) 420 qm_info->num_pf_rls++; 421 422 if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) 423 DP_ERR(p_hwfn, 424 "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", 425 qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); 426 427 if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn)) 428 DP_ERR(p_hwfn, 429 "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", 430 qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn)); 431 } 432 433 /* get pq index according to PQ_FLAGS */ 434 static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, 435 u32 pq_flags) 436 { 437 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 438 439 /* Can't have multiple flags set here */ 440 if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) 441 goto err; 442 443 switch (pq_flags) { 444 case PQ_FLAGS_RLS: 445 return &qm_info->first_rl_pq; 446 case PQ_FLAGS_MCOS: 447 return &qm_info->first_mcos_pq; 448 case PQ_FLAGS_LB: 449 return &qm_info->pure_lb_pq; 450 case PQ_FLAGS_OOO: 451 return &qm_info->ooo_pq; 452 case PQ_FLAGS_ACK: 453 return &qm_info->pure_ack_pq; 454 case PQ_FLAGS_OFLD: 455 return &qm_info->offload_pq; 456 case PQ_FLAGS_LLT: 457 return &qm_info->low_latency_pq; 458 case PQ_FLAGS_VFS: 459 return &qm_info->first_vf_pq; 460 default: 461 goto err; 462 } 463 464 err: 465 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); 466 return NULL; 467 } 468 469 /* save pq index in qm info */ 470 static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn, 471 u32 pq_flags, u16 pq_val) 472 { 473 u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 474 475 *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; 476 } 477 478 /* get tx pq index, with the PQ TX base already set (ready for context init) */ 479 u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags) 480 { 481 u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); 482 483 return *base_pq_idx + CM_TX_PQ_BASE; 484 } 485 486 u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) 487 { 488 u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); 489 490 if (tc > max_tc) 491 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); 492 493 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; 494 } 495 496 u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) 497 { 498 u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); 499 500 if (vf > max_vf) 501 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); 502 503 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; 504 } 505 506 u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl) 507 { 508 u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn); 509 510 if (rl > max_rl) 511 DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); 512 513 return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; 514 } 515 516 /* Functions for creating specific types of pqs */ 517 static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) 518 { 519 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 520 521 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) 522 return; 523 524 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); 525 qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); 526 } 527 528 static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn) 529 { 530 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 531 532 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) 533 return; 534 535 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); 536 qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); 537 } 538 539 static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn) 540 { 541 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 542 543 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) 544 return; 545 546 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); 547 qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 548 } 549 550 static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) 551 { 552 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 553 554 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) 555 return; 556 557 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); 558 qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 559 } 560 561 static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) 562 { 563 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 564 565 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) 566 return; 567 568 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); 569 qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); 570 } 571 572 static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn) 573 { 574 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 575 u8 tc_idx; 576 577 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) 578 return; 579 580 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); 581 for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++) 582 qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); 583 } 584 585 static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn) 586 { 587 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 588 u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); 589 590 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) 591 return; 592 593 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); 594 qm_info->num_vf_pqs = num_vfs; 595 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) 596 qed_init_qm_pq(p_hwfn, 597 qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); 598 } 599 600 static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn) 601 { 602 u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn); 603 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 604 605 if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) 606 return; 607 608 qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); 609 for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) 610 qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); 611 } 612 613 static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn) 614 { 615 /* rate limited pqs, must come first (FW assumption) */ 616 qed_init_qm_rl_pqs(p_hwfn); 617 618 /* pqs for multi cos */ 619 qed_init_qm_mcos_pqs(p_hwfn); 620 621 /* pure loopback pq */ 622 qed_init_qm_lb_pq(p_hwfn); 623 624 /* out of order pq */ 625 qed_init_qm_ooo_pq(p_hwfn); 626 627 /* pure ack pq */ 628 qed_init_qm_pure_ack_pq(p_hwfn); 629 630 /* pq for offloaded protocol */ 631 qed_init_qm_offload_pq(p_hwfn); 632 633 /* low latency pq */ 634 qed_init_qm_low_latency_pq(p_hwfn); 635 636 /* done sharing vports */ 637 qed_init_qm_advance_vport(p_hwfn); 638 639 /* pqs for vfs */ 640 qed_init_qm_vf_pqs(p_hwfn); 641 } 642 643 /* compare values of getters against resources amounts */ 644 static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn) 645 { 646 if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) { 647 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); 648 return -EINVAL; 649 } 650 651 if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) { 652 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); 653 return -EINVAL; 654 } 655 656 return 0; 657 } 658 659 static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) 660 { 661 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 662 struct init_qm_vport_params *vport; 663 struct init_qm_port_params *port; 664 struct init_qm_pq_params *pq; 665 int i, tc; 666 667 /* top level params */ 668 DP_VERBOSE(p_hwfn, 669 NETIF_MSG_HW, 670 "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", 671 qm_info->start_pq, 672 qm_info->start_vport, 673 qm_info->pure_lb_pq, 674 qm_info->offload_pq, qm_info->pure_ack_pq); 675 DP_VERBOSE(p_hwfn, 676 NETIF_MSG_HW, 677 "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", 678 qm_info->ooo_pq, 679 qm_info->first_vf_pq, 680 qm_info->num_pqs, 681 qm_info->num_vf_pqs, 682 qm_info->num_vports, qm_info->max_phys_tcs_per_port); 683 DP_VERBOSE(p_hwfn, 684 NETIF_MSG_HW, 685 "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", 686 qm_info->pf_rl_en, 687 qm_info->pf_wfq_en, 688 qm_info->vport_rl_en, 689 qm_info->vport_wfq_en, 690 qm_info->pf_wfq, 691 qm_info->pf_rl, 692 qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn)); 693 694 /* port table */ 695 for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) { 696 port = &(qm_info->qm_port_params[i]); 697 DP_VERBOSE(p_hwfn, 698 NETIF_MSG_HW, 699 "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", 700 i, 701 port->active, 702 port->active_phys_tcs, 703 port->num_pbf_cmd_lines, 704 port->num_btb_blocks, port->reserved); 705 } 706 707 /* vport table */ 708 for (i = 0; i < qm_info->num_vports; i++) { 709 vport = &(qm_info->qm_vport_params[i]); 710 DP_VERBOSE(p_hwfn, 711 NETIF_MSG_HW, 712 "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", 713 qm_info->start_vport + i, 714 vport->vport_rl, vport->vport_wfq); 715 for (tc = 0; tc < NUM_OF_TCS; tc++) 716 DP_VERBOSE(p_hwfn, 717 NETIF_MSG_HW, 718 "%d ", vport->first_tx_pq_id[tc]); 719 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n"); 720 } 721 722 /* pq table */ 723 for (i = 0; i < qm_info->num_pqs; i++) { 724 pq = &(qm_info->qm_pq_params[i]); 725 DP_VERBOSE(p_hwfn, 726 NETIF_MSG_HW, 727 "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", 728 qm_info->start_pq + i, 729 pq->vport_id, 730 pq->tc_id, pq->wrr_group, pq->rl_valid); 731 } 732 } 733 734 static void qed_init_qm_info(struct qed_hwfn *p_hwfn) 735 { 736 /* reset params required for init run */ 737 qed_init_qm_reset_params(p_hwfn); 738 739 /* init QM top level params */ 740 qed_init_qm_params(p_hwfn); 741 742 /* init QM port params */ 743 qed_init_qm_port_params(p_hwfn); 744 745 /* init QM vport params */ 746 qed_init_qm_vport_params(p_hwfn); 747 748 /* init QM physical queue params */ 749 qed_init_qm_pq_params(p_hwfn); 750 751 /* display all that init */ 752 qed_dp_init_qm_params(p_hwfn); 753 } 754 755 /* This function reconfigures the QM pf on the fly. 756 * For this purpose we: 757 * 1. reconfigure the QM database 758 * 2. set new values to runtime arrat 759 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM 760 * 4. activate init tool in QM_PF stage 761 * 5. send an sdm_qm_cmd through rbc interface to release the QM 762 */ 763 int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 764 { 765 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 766 bool b_rc; 767 int rc; 768 769 /* initialize qed's qm data structure */ 770 qed_init_qm_info(p_hwfn); 771 772 /* stop PF's qm queues */ 773 spin_lock_bh(&qm_lock); 774 b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, 775 qm_info->start_pq, qm_info->num_pqs); 776 spin_unlock_bh(&qm_lock); 777 if (!b_rc) 778 return -EINVAL; 779 780 /* clear the QM_PF runtime phase leftovers from previous init */ 781 qed_init_clear_rt_data(p_hwfn); 782 783 /* prepare QM portion of runtime array */ 784 qed_qm_init_pf(p_hwfn, p_ptt); 785 786 /* activate init tool on runtime array */ 787 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, 788 p_hwfn->hw_info.hw_mode); 789 if (rc) 790 return rc; 791 792 /* start PF's qm queues */ 793 spin_lock_bh(&qm_lock); 794 b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, 795 qm_info->start_pq, qm_info->num_pqs); 796 spin_unlock_bh(&qm_lock); 797 if (!b_rc) 798 return -EINVAL; 799 800 return 0; 801 } 802 803 static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) 804 { 805 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 806 int rc; 807 808 rc = qed_init_qm_sanity(p_hwfn); 809 if (rc) 810 goto alloc_err; 811 812 qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * 813 qed_init_qm_get_num_pqs(p_hwfn), 814 GFP_KERNEL); 815 if (!qm_info->qm_pq_params) 816 goto alloc_err; 817 818 qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * 819 qed_init_qm_get_num_vports(p_hwfn), 820 GFP_KERNEL); 821 if (!qm_info->qm_vport_params) 822 goto alloc_err; 823 824 qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * 825 p_hwfn->cdev->num_ports_in_engines, 826 GFP_KERNEL); 827 if (!qm_info->qm_port_params) 828 goto alloc_err; 829 830 qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) * 831 qed_init_qm_get_num_vports(p_hwfn), 832 GFP_KERNEL); 833 if (!qm_info->wfq_data) 834 goto alloc_err; 835 836 return 0; 837 838 alloc_err: 839 DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); 840 qed_qm_info_free(p_hwfn); 841 return -ENOMEM; 842 } 843 844 int qed_resc_alloc(struct qed_dev *cdev) 845 { 846 struct qed_iscsi_info *p_iscsi_info; 847 struct qed_fcoe_info *p_fcoe_info; 848 struct qed_ooo_info *p_ooo_info; 849 #ifdef CONFIG_QED_LL2 850 struct qed_ll2_info *p_ll2_info; 851 #endif 852 u32 rdma_tasks, excess_tasks; 853 struct qed_consq *p_consq; 854 struct qed_eq *p_eq; 855 u32 line_count; 856 int i, rc = 0; 857 858 if (IS_VF(cdev)) 859 return rc; 860 861 cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); 862 if (!cdev->fw_data) 863 return -ENOMEM; 864 865 for_each_hwfn(cdev, i) { 866 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 867 u32 n_eqes, num_cons; 868 869 /* First allocate the context manager structure */ 870 rc = qed_cxt_mngr_alloc(p_hwfn); 871 if (rc) 872 goto alloc_err; 873 874 /* Set the HW cid/tid numbers (in the contest manager) 875 * Must be done prior to any further computations. 876 */ 877 rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); 878 if (rc) 879 goto alloc_err; 880 881 rc = qed_alloc_qm_data(p_hwfn); 882 if (rc) 883 goto alloc_err; 884 885 /* init qm info */ 886 qed_init_qm_info(p_hwfn); 887 888 /* Compute the ILT client partition */ 889 rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); 890 if (rc) { 891 DP_NOTICE(p_hwfn, 892 "too many ILT lines; re-computing with less lines\n"); 893 /* In case there are not enough ILT lines we reduce the 894 * number of RDMA tasks and re-compute. 895 */ 896 excess_tasks = 897 qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count); 898 if (!excess_tasks) 899 goto alloc_err; 900 901 rdma_tasks = RDMA_MAX_TIDS - excess_tasks; 902 rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks); 903 if (rc) 904 goto alloc_err; 905 906 rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); 907 if (rc) { 908 DP_ERR(p_hwfn, 909 "failed ILT compute. Requested too many lines: %u\n", 910 line_count); 911 912 goto alloc_err; 913 } 914 } 915 916 /* CID map / ILT shadow table / T2 917 * The talbes sizes are determined by the computations above 918 */ 919 rc = qed_cxt_tables_alloc(p_hwfn); 920 if (rc) 921 goto alloc_err; 922 923 /* SPQ, must follow ILT because initializes SPQ context */ 924 rc = qed_spq_alloc(p_hwfn); 925 if (rc) 926 goto alloc_err; 927 928 /* SP status block allocation */ 929 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn, 930 RESERVED_PTT_DPC); 931 932 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt); 933 if (rc) 934 goto alloc_err; 935 936 rc = qed_iov_alloc(p_hwfn); 937 if (rc) 938 goto alloc_err; 939 940 /* EQ */ 941 n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); 942 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { 943 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, 944 PROTOCOLID_ROCE, 945 NULL) * 2; 946 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 947 } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 948 num_cons = 949 qed_cxt_get_proto_cid_count(p_hwfn, 950 PROTOCOLID_ISCSI, 951 NULL); 952 n_eqes += 2 * num_cons; 953 } 954 955 if (n_eqes > 0xFFFF) { 956 DP_ERR(p_hwfn, 957 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", 958 n_eqes, 0xFFFF); 959 rc = -EINVAL; 960 goto alloc_err; 961 } 962 963 p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes); 964 if (!p_eq) 965 goto alloc_no_mem; 966 p_hwfn->p_eq = p_eq; 967 968 p_consq = qed_consq_alloc(p_hwfn); 969 if (!p_consq) 970 goto alloc_no_mem; 971 p_hwfn->p_consq = p_consq; 972 973 #ifdef CONFIG_QED_LL2 974 if (p_hwfn->using_ll2) { 975 p_ll2_info = qed_ll2_alloc(p_hwfn); 976 if (!p_ll2_info) 977 goto alloc_no_mem; 978 p_hwfn->p_ll2_info = p_ll2_info; 979 } 980 #endif 981 982 if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { 983 p_fcoe_info = qed_fcoe_alloc(p_hwfn); 984 if (!p_fcoe_info) 985 goto alloc_no_mem; 986 p_hwfn->p_fcoe_info = p_fcoe_info; 987 } 988 989 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 990 p_iscsi_info = qed_iscsi_alloc(p_hwfn); 991 if (!p_iscsi_info) 992 goto alloc_no_mem; 993 p_hwfn->p_iscsi_info = p_iscsi_info; 994 p_ooo_info = qed_ooo_alloc(p_hwfn); 995 if (!p_ooo_info) 996 goto alloc_no_mem; 997 p_hwfn->p_ooo_info = p_ooo_info; 998 } 999 1000 /* DMA info initialization */ 1001 rc = qed_dmae_info_alloc(p_hwfn); 1002 if (rc) 1003 goto alloc_err; 1004 1005 /* DCBX initialization */ 1006 rc = qed_dcbx_info_alloc(p_hwfn); 1007 if (rc) 1008 goto alloc_err; 1009 } 1010 1011 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); 1012 if (!cdev->reset_stats) 1013 goto alloc_no_mem; 1014 1015 return 0; 1016 1017 alloc_no_mem: 1018 rc = -ENOMEM; 1019 alloc_err: 1020 qed_resc_free(cdev); 1021 return rc; 1022 } 1023 1024 void qed_resc_setup(struct qed_dev *cdev) 1025 { 1026 int i; 1027 1028 if (IS_VF(cdev)) 1029 return; 1030 1031 for_each_hwfn(cdev, i) { 1032 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1033 1034 qed_cxt_mngr_setup(p_hwfn); 1035 qed_spq_setup(p_hwfn); 1036 qed_eq_setup(p_hwfn, p_hwfn->p_eq); 1037 qed_consq_setup(p_hwfn, p_hwfn->p_consq); 1038 1039 /* Read shadow of current MFW mailbox */ 1040 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); 1041 memcpy(p_hwfn->mcp_info->mfw_mb_shadow, 1042 p_hwfn->mcp_info->mfw_mb_cur, 1043 p_hwfn->mcp_info->mfw_mb_length); 1044 1045 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); 1046 1047 qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); 1048 #ifdef CONFIG_QED_LL2 1049 if (p_hwfn->using_ll2) 1050 qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); 1051 #endif 1052 if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 1053 qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info); 1054 1055 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 1056 qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info); 1057 qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info); 1058 } 1059 } 1060 } 1061 1062 #define FINAL_CLEANUP_POLL_CNT (100) 1063 #define FINAL_CLEANUP_POLL_TIME (10) 1064 int qed_final_cleanup(struct qed_hwfn *p_hwfn, 1065 struct qed_ptt *p_ptt, u16 id, bool is_vf) 1066 { 1067 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; 1068 int rc = -EBUSY; 1069 1070 addr = GTT_BAR0_MAP_REG_USDM_RAM + 1071 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); 1072 1073 if (is_vf) 1074 id += 0x10; 1075 1076 command |= X_FINAL_CLEANUP_AGG_INT << 1077 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; 1078 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; 1079 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; 1080 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; 1081 1082 /* Make sure notification is not set before initiating final cleanup */ 1083 if (REG_RD(p_hwfn, addr)) { 1084 DP_NOTICE(p_hwfn, 1085 "Unexpected; Found final cleanup notification before initiating final cleanup\n"); 1086 REG_WR(p_hwfn, addr, 0); 1087 } 1088 1089 DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1090 "Sending final cleanup for PFVF[%d] [Command %08x\n]", 1091 id, command); 1092 1093 qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); 1094 1095 /* Poll until completion */ 1096 while (!REG_RD(p_hwfn, addr) && count--) 1097 msleep(FINAL_CLEANUP_POLL_TIME); 1098 1099 if (REG_RD(p_hwfn, addr)) 1100 rc = 0; 1101 else 1102 DP_NOTICE(p_hwfn, 1103 "Failed to receive FW final cleanup notification\n"); 1104 1105 /* Cleanup afterwards */ 1106 REG_WR(p_hwfn, addr, 0); 1107 1108 return rc; 1109 } 1110 1111 static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) 1112 { 1113 int hw_mode = 0; 1114 1115 if (QED_IS_BB_B0(p_hwfn->cdev)) { 1116 hw_mode |= 1 << MODE_BB; 1117 } else if (QED_IS_AH(p_hwfn->cdev)) { 1118 hw_mode |= 1 << MODE_K2; 1119 } else { 1120 DP_NOTICE(p_hwfn, "Unknown chip type %#x\n", 1121 p_hwfn->cdev->type); 1122 return -EINVAL; 1123 } 1124 1125 switch (p_hwfn->cdev->num_ports_in_engines) { 1126 case 1: 1127 hw_mode |= 1 << MODE_PORTS_PER_ENG_1; 1128 break; 1129 case 2: 1130 hw_mode |= 1 << MODE_PORTS_PER_ENG_2; 1131 break; 1132 case 4: 1133 hw_mode |= 1 << MODE_PORTS_PER_ENG_4; 1134 break; 1135 default: 1136 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", 1137 p_hwfn->cdev->num_ports_in_engines); 1138 return -EINVAL; 1139 } 1140 1141 switch (p_hwfn->cdev->mf_mode) { 1142 case QED_MF_DEFAULT: 1143 case QED_MF_NPAR: 1144 hw_mode |= 1 << MODE_MF_SI; 1145 break; 1146 case QED_MF_OVLAN: 1147 hw_mode |= 1 << MODE_MF_SD; 1148 break; 1149 default: 1150 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); 1151 hw_mode |= 1 << MODE_MF_SI; 1152 } 1153 1154 hw_mode |= 1 << MODE_ASIC; 1155 1156 if (p_hwfn->cdev->num_hwfns > 1) 1157 hw_mode |= 1 << MODE_100G; 1158 1159 p_hwfn->hw_info.hw_mode = hw_mode; 1160 1161 DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), 1162 "Configuring function for hw_mode: 0x%08x\n", 1163 p_hwfn->hw_info.hw_mode); 1164 1165 return 0; 1166 } 1167 1168 /* Init run time data for all PFs on an engine. */ 1169 static void qed_init_cau_rt_data(struct qed_dev *cdev) 1170 { 1171 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; 1172 int i, sb_id; 1173 1174 for_each_hwfn(cdev, i) { 1175 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1176 struct qed_igu_info *p_igu_info; 1177 struct qed_igu_block *p_block; 1178 struct cau_sb_entry sb_entry; 1179 1180 p_igu_info = p_hwfn->hw_info.p_igu_info; 1181 1182 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev); 1183 sb_id++) { 1184 p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; 1185 if (!p_block->is_pf) 1186 continue; 1187 1188 qed_init_cau_sb_entry(p_hwfn, &sb_entry, 1189 p_block->function_id, 0, 0); 1190 STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); 1191 } 1192 } 1193 } 1194 1195 static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, 1196 struct qed_ptt *p_ptt) 1197 { 1198 u32 val, wr_mbs, cache_line_size; 1199 1200 val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); 1201 switch (val) { 1202 case 0: 1203 wr_mbs = 128; 1204 break; 1205 case 1: 1206 wr_mbs = 256; 1207 break; 1208 case 2: 1209 wr_mbs = 512; 1210 break; 1211 default: 1212 DP_INFO(p_hwfn, 1213 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1214 val); 1215 return; 1216 } 1217 1218 cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs); 1219 switch (cache_line_size) { 1220 case 32: 1221 val = 0; 1222 break; 1223 case 64: 1224 val = 1; 1225 break; 1226 case 128: 1227 val = 2; 1228 break; 1229 case 256: 1230 val = 3; 1231 break; 1232 default: 1233 DP_INFO(p_hwfn, 1234 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", 1235 cache_line_size); 1236 } 1237 1238 if (L1_CACHE_BYTES > wr_mbs) 1239 DP_INFO(p_hwfn, 1240 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", 1241 L1_CACHE_BYTES, wr_mbs); 1242 1243 STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); 1244 } 1245 1246 static int qed_hw_init_common(struct qed_hwfn *p_hwfn, 1247 struct qed_ptt *p_ptt, int hw_mode) 1248 { 1249 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1250 struct qed_qm_common_rt_init_params params; 1251 struct qed_dev *cdev = p_hwfn->cdev; 1252 u8 vf_id, max_num_vfs; 1253 u16 num_pfs, pf_id; 1254 u32 concrete_fid; 1255 int rc = 0; 1256 1257 qed_init_cau_rt_data(cdev); 1258 1259 /* Program GTT windows */ 1260 qed_gtt_init(p_hwfn); 1261 1262 if (p_hwfn->mcp_info) { 1263 if (p_hwfn->mcp_info->func_info.bandwidth_max) 1264 qm_info->pf_rl_en = 1; 1265 if (p_hwfn->mcp_info->func_info.bandwidth_min) 1266 qm_info->pf_wfq_en = 1; 1267 } 1268 1269 memset(¶ms, 0, sizeof(params)); 1270 params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines; 1271 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; 1272 params.pf_rl_en = qm_info->pf_rl_en; 1273 params.pf_wfq_en = qm_info->pf_wfq_en; 1274 params.vport_rl_en = qm_info->vport_rl_en; 1275 params.vport_wfq_en = qm_info->vport_wfq_en; 1276 params.port_params = qm_info->qm_port_params; 1277 1278 qed_qm_common_rt_init(p_hwfn, ¶ms); 1279 1280 qed_cxt_hw_init_common(p_hwfn); 1281 1282 qed_init_cache_line_size(p_hwfn, p_ptt); 1283 1284 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); 1285 if (rc) 1286 return rc; 1287 1288 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); 1289 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); 1290 1291 if (QED_IS_BB(p_hwfn->cdev)) { 1292 num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev); 1293 for (pf_id = 0; pf_id < num_pfs; pf_id++) { 1294 qed_fid_pretend(p_hwfn, p_ptt, pf_id); 1295 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1296 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1297 } 1298 /* pretend to original PF */ 1299 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1300 } 1301 1302 max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; 1303 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { 1304 concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); 1305 qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); 1306 qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); 1307 qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); 1308 qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); 1309 qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); 1310 } 1311 /* pretend to original PF */ 1312 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1313 1314 return rc; 1315 } 1316 1317 static int 1318 qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, 1319 struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) 1320 { 1321 u32 dpi_bit_shift, dpi_count, dpi_page_size; 1322 u32 min_dpis; 1323 u32 n_wids; 1324 1325 /* Calculate DPI size */ 1326 n_wids = max_t(u32, QED_MIN_WIDS, n_cpus); 1327 dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids); 1328 dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); 1329 dpi_bit_shift = ilog2(dpi_page_size / 4096); 1330 dpi_count = pwm_region_size / dpi_page_size; 1331 1332 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; 1333 min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis); 1334 1335 p_hwfn->dpi_size = dpi_page_size; 1336 p_hwfn->dpi_count = dpi_count; 1337 1338 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); 1339 1340 if (dpi_count < min_dpis) 1341 return -EINVAL; 1342 1343 return 0; 1344 } 1345 1346 enum QED_ROCE_EDPM_MODE { 1347 QED_ROCE_EDPM_MODE_ENABLE = 0, 1348 QED_ROCE_EDPM_MODE_FORCE_ON = 1, 1349 QED_ROCE_EDPM_MODE_DISABLE = 2, 1350 }; 1351 1352 static int 1353 qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1354 { 1355 u32 pwm_regsize, norm_regsize; 1356 u32 non_pwm_conn, min_addr_reg1; 1357 u32 db_bar_size, n_cpus = 1; 1358 u32 roce_edpm_mode; 1359 u32 pf_dems_shift; 1360 int rc = 0; 1361 u8 cond; 1362 1363 db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); 1364 if (p_hwfn->cdev->num_hwfns > 1) 1365 db_bar_size /= 2; 1366 1367 /* Calculate doorbell regions */ 1368 non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + 1369 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, 1370 NULL) + 1371 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 1372 NULL); 1373 norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096); 1374 min_addr_reg1 = norm_regsize / 4096; 1375 pwm_regsize = db_bar_size - norm_regsize; 1376 1377 /* Check that the normal and PWM sizes are valid */ 1378 if (db_bar_size < norm_regsize) { 1379 DP_ERR(p_hwfn->cdev, 1380 "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", 1381 db_bar_size, norm_regsize); 1382 return -EINVAL; 1383 } 1384 1385 if (pwm_regsize < QED_MIN_PWM_REGION) { 1386 DP_ERR(p_hwfn->cdev, 1387 "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", 1388 pwm_regsize, 1389 QED_MIN_PWM_REGION, db_bar_size, norm_regsize); 1390 return -EINVAL; 1391 } 1392 1393 /* Calculate number of DPIs */ 1394 roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; 1395 if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) || 1396 ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) { 1397 /* Either EDPM is mandatory, or we are attempting to allocate a 1398 * WID per CPU. 1399 */ 1400 n_cpus = num_present_cpus(); 1401 rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1402 } 1403 1404 cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) || 1405 (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE); 1406 if (cond || p_hwfn->dcbx_no_edpm) { 1407 /* Either EDPM is disabled from user configuration, or it is 1408 * disabled via DCBx, or it is not mandatory and we failed to 1409 * allocated a WID per CPU. 1410 */ 1411 n_cpus = 1; 1412 rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); 1413 1414 if (cond) 1415 qed_rdma_dpm_bar(p_hwfn, p_ptt); 1416 } 1417 1418 p_hwfn->wid_count = (u16) n_cpus; 1419 1420 DP_INFO(p_hwfn, 1421 "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", 1422 norm_regsize, 1423 pwm_regsize, 1424 p_hwfn->dpi_size, 1425 p_hwfn->dpi_count, 1426 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? 1427 "disabled" : "enabled"); 1428 1429 if (rc) { 1430 DP_ERR(p_hwfn, 1431 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n", 1432 p_hwfn->dpi_count, 1433 p_hwfn->pf_params.rdma_pf_params.min_dpis); 1434 return -EINVAL; 1435 } 1436 1437 p_hwfn->dpi_start_offset = norm_regsize; 1438 1439 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ 1440 pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4); 1441 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); 1442 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); 1443 1444 return 0; 1445 } 1446 1447 static int qed_hw_init_port(struct qed_hwfn *p_hwfn, 1448 struct qed_ptt *p_ptt, int hw_mode) 1449 { 1450 return qed_init_run(p_hwfn, p_ptt, PHASE_PORT, 1451 p_hwfn->port_id, hw_mode); 1452 } 1453 1454 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, 1455 struct qed_ptt *p_ptt, 1456 struct qed_tunnel_info *p_tunn, 1457 int hw_mode, 1458 bool b_hw_start, 1459 enum qed_int_mode int_mode, 1460 bool allow_npar_tx_switch) 1461 { 1462 u8 rel_pf_id = p_hwfn->rel_pf_id; 1463 int rc = 0; 1464 1465 if (p_hwfn->mcp_info) { 1466 struct qed_mcp_function_info *p_info; 1467 1468 p_info = &p_hwfn->mcp_info->func_info; 1469 if (p_info->bandwidth_min) 1470 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; 1471 1472 /* Update rate limit once we'll actually have a link */ 1473 p_hwfn->qm_info.pf_rl = 100000; 1474 } 1475 1476 qed_cxt_hw_init_pf(p_hwfn, p_ptt); 1477 1478 qed_int_igu_init_rt(p_hwfn); 1479 1480 /* Set VLAN in NIG if needed */ 1481 if (hw_mode & BIT(MODE_MF_SD)) { 1482 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); 1483 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 1484 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 1485 p_hwfn->hw_info.ovlan); 1486 } 1487 1488 /* Enable classification by MAC if needed */ 1489 if (hw_mode & BIT(MODE_MF_SI)) { 1490 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 1491 "Configuring TAGMAC_CLS_TYPE\n"); 1492 STORE_RT_REG(p_hwfn, 1493 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); 1494 } 1495 1496 /* Protocl Configuration */ 1497 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 1498 (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); 1499 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 1500 (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); 1501 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); 1502 1503 /* Cleanup chip from previous driver if such remains exist */ 1504 rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); 1505 if (rc) 1506 return rc; 1507 1508 /* PF Init sequence */ 1509 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); 1510 if (rc) 1511 return rc; 1512 1513 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ 1514 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); 1515 if (rc) 1516 return rc; 1517 1518 /* Pure runtime initializations - directly to the HW */ 1519 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 1520 1521 rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); 1522 if (rc) 1523 return rc; 1524 1525 if (b_hw_start) { 1526 /* enable interrupts */ 1527 qed_int_igu_enable(p_hwfn, p_ptt, int_mode); 1528 1529 /* send function start command */ 1530 rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, 1531 allow_npar_tx_switch); 1532 if (rc) { 1533 DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); 1534 return rc; 1535 } 1536 if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { 1537 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2)); 1538 qed_wr(p_hwfn, p_ptt, 1539 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 1540 0x100); 1541 } 1542 } 1543 return rc; 1544 } 1545 1546 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn, 1547 struct qed_ptt *p_ptt, 1548 u8 enable) 1549 { 1550 u32 delay_idx = 0, val, set_val = enable ? 1 : 0; 1551 1552 /* Change PF in PXP */ 1553 qed_wr(p_hwfn, p_ptt, 1554 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); 1555 1556 /* wait until value is set - try for 1 second every 50us */ 1557 for (delay_idx = 0; delay_idx < 20000; delay_idx++) { 1558 val = qed_rd(p_hwfn, p_ptt, 1559 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1560 if (val == set_val) 1561 break; 1562 1563 usleep_range(50, 60); 1564 } 1565 1566 if (val != set_val) { 1567 DP_NOTICE(p_hwfn, 1568 "PFID_ENABLE_MASTER wasn't changed after a second\n"); 1569 return -EAGAIN; 1570 } 1571 1572 return 0; 1573 } 1574 1575 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, 1576 struct qed_ptt *p_main_ptt) 1577 { 1578 /* Read shadow of current MFW mailbox */ 1579 qed_mcp_read_mb(p_hwfn, p_main_ptt); 1580 memcpy(p_hwfn->mcp_info->mfw_mb_shadow, 1581 p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); 1582 } 1583 1584 static void 1585 qed_fill_load_req_params(struct qed_load_req_params *p_load_req, 1586 struct qed_drv_load_params *p_drv_load) 1587 { 1588 memset(p_load_req, 0, sizeof(*p_load_req)); 1589 1590 p_load_req->drv_role = p_drv_load->is_crash_kernel ? 1591 QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS; 1592 p_load_req->timeout_val = p_drv_load->mfw_timeout_val; 1593 p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; 1594 p_load_req->override_force_load = p_drv_load->override_force_load; 1595 } 1596 1597 static int qed_vf_start(struct qed_hwfn *p_hwfn, 1598 struct qed_hw_init_params *p_params) 1599 { 1600 if (p_params->p_tunn) { 1601 qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 1602 qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 1603 } 1604 1605 p_hwfn->b_int_enabled = 1; 1606 1607 return 0; 1608 } 1609 1610 int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) 1611 { 1612 struct qed_load_req_params load_req_params; 1613 u32 load_code, param, drv_mb_param; 1614 bool b_default_mtu = true; 1615 struct qed_hwfn *p_hwfn; 1616 int rc = 0, mfw_rc, i; 1617 1618 if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 1619 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 1620 return -EINVAL; 1621 } 1622 1623 if (IS_PF(cdev)) { 1624 rc = qed_init_fw_data(cdev, p_params->bin_fw_data); 1625 if (rc) 1626 return rc; 1627 } 1628 1629 for_each_hwfn(cdev, i) { 1630 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1631 1632 /* If management didn't provide a default, set one of our own */ 1633 if (!p_hwfn->hw_info.mtu) { 1634 p_hwfn->hw_info.mtu = 1500; 1635 b_default_mtu = false; 1636 } 1637 1638 if (IS_VF(cdev)) { 1639 qed_vf_start(p_hwfn, p_params); 1640 continue; 1641 } 1642 1643 /* Enable DMAE in PXP */ 1644 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); 1645 1646 rc = qed_calc_hw_mode(p_hwfn); 1647 if (rc) 1648 return rc; 1649 1650 qed_fill_load_req_params(&load_req_params, 1651 p_params->p_drv_load_params); 1652 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, 1653 &load_req_params); 1654 if (rc) { 1655 DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n"); 1656 return rc; 1657 } 1658 1659 load_code = load_req_params.load_code; 1660 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1661 "Load request was sent. Load code: 0x%x\n", 1662 load_code); 1663 1664 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); 1665 1666 p_hwfn->first_on_engine = (load_code == 1667 FW_MSG_CODE_DRV_LOAD_ENGINE); 1668 1669 switch (load_code) { 1670 case FW_MSG_CODE_DRV_LOAD_ENGINE: 1671 rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, 1672 p_hwfn->hw_info.hw_mode); 1673 if (rc) 1674 break; 1675 /* Fall into */ 1676 case FW_MSG_CODE_DRV_LOAD_PORT: 1677 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, 1678 p_hwfn->hw_info.hw_mode); 1679 if (rc) 1680 break; 1681 1682 /* Fall into */ 1683 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 1684 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 1685 p_params->p_tunn, 1686 p_hwfn->hw_info.hw_mode, 1687 p_params->b_hw_start, 1688 p_params->int_mode, 1689 p_params->allow_npar_tx_switch); 1690 break; 1691 default: 1692 DP_NOTICE(p_hwfn, 1693 "Unexpected load code [0x%08x]", load_code); 1694 rc = -EINVAL; 1695 break; 1696 } 1697 1698 if (rc) 1699 DP_NOTICE(p_hwfn, 1700 "init phase failed for loadcode 0x%x (rc %d)\n", 1701 load_code, rc); 1702 1703 /* ACK mfw regardless of success or failure of initialization */ 1704 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 1705 DRV_MSG_CODE_LOAD_DONE, 1706 0, &load_code, ¶m); 1707 if (rc) 1708 return rc; 1709 if (mfw_rc) { 1710 DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n"); 1711 return mfw_rc; 1712 } 1713 1714 /* send DCBX attention request command */ 1715 DP_VERBOSE(p_hwfn, 1716 QED_MSG_DCB, 1717 "sending phony dcbx set command to trigger DCBx attention handling\n"); 1718 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 1719 DRV_MSG_CODE_SET_DCBX, 1720 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, 1721 &load_code, ¶m); 1722 if (mfw_rc) { 1723 DP_NOTICE(p_hwfn, 1724 "Failed to send DCBX attention request\n"); 1725 return mfw_rc; 1726 } 1727 1728 p_hwfn->hw_init_done = true; 1729 } 1730 1731 if (IS_PF(cdev)) { 1732 p_hwfn = QED_LEADING_HWFN(cdev); 1733 drv_mb_param = STORM_FW_VERSION; 1734 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 1735 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 1736 drv_mb_param, &load_code, ¶m); 1737 if (rc) 1738 DP_INFO(p_hwfn, "Failed to update firmware version\n"); 1739 1740 if (!b_default_mtu) { 1741 rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, 1742 p_hwfn->hw_info.mtu); 1743 if (rc) 1744 DP_INFO(p_hwfn, 1745 "Failed to update default mtu\n"); 1746 } 1747 1748 rc = qed_mcp_ov_update_driver_state(p_hwfn, 1749 p_hwfn->p_main_ptt, 1750 QED_OV_DRIVER_STATE_DISABLED); 1751 if (rc) 1752 DP_INFO(p_hwfn, "Failed to update driver state\n"); 1753 1754 rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 1755 QED_OV_ESWITCH_VEB); 1756 if (rc) 1757 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 1758 } 1759 1760 return 0; 1761 } 1762 1763 #define QED_HW_STOP_RETRY_LIMIT (10) 1764 static void qed_hw_timers_stop(struct qed_dev *cdev, 1765 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1766 { 1767 int i; 1768 1769 /* close timers */ 1770 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 1771 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 1772 1773 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { 1774 if ((!qed_rd(p_hwfn, p_ptt, 1775 TM_REG_PF_SCAN_ACTIVE_CONN)) && 1776 (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) 1777 break; 1778 1779 /* Dependent on number of connection/tasks, possibly 1780 * 1ms sleep is required between polls 1781 */ 1782 usleep_range(1000, 2000); 1783 } 1784 1785 if (i < QED_HW_STOP_RETRY_LIMIT) 1786 return; 1787 1788 DP_NOTICE(p_hwfn, 1789 "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 1790 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 1791 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 1792 } 1793 1794 void qed_hw_timers_stop_all(struct qed_dev *cdev) 1795 { 1796 int j; 1797 1798 for_each_hwfn(cdev, j) { 1799 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 1800 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; 1801 1802 qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 1803 } 1804 } 1805 1806 int qed_hw_stop(struct qed_dev *cdev) 1807 { 1808 struct qed_hwfn *p_hwfn; 1809 struct qed_ptt *p_ptt; 1810 int rc, rc2 = 0; 1811 int j; 1812 1813 for_each_hwfn(cdev, j) { 1814 p_hwfn = &cdev->hwfns[j]; 1815 p_ptt = p_hwfn->p_main_ptt; 1816 1817 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); 1818 1819 if (IS_VF(cdev)) { 1820 qed_vf_pf_int_cleanup(p_hwfn); 1821 rc = qed_vf_pf_reset(p_hwfn); 1822 if (rc) { 1823 DP_NOTICE(p_hwfn, 1824 "qed_vf_pf_reset failed. rc = %d.\n", 1825 rc); 1826 rc2 = -EINVAL; 1827 } 1828 continue; 1829 } 1830 1831 /* mark the hw as uninitialized... */ 1832 p_hwfn->hw_init_done = false; 1833 1834 /* Send unload command to MCP */ 1835 rc = qed_mcp_unload_req(p_hwfn, p_ptt); 1836 if (rc) { 1837 DP_NOTICE(p_hwfn, 1838 "Failed sending a UNLOAD_REQ command. rc = %d.\n", 1839 rc); 1840 rc2 = -EINVAL; 1841 } 1842 1843 qed_slowpath_irq_sync(p_hwfn); 1844 1845 /* After this point no MFW attentions are expected, e.g. prevent 1846 * race between pf stop and dcbx pf update. 1847 */ 1848 rc = qed_sp_pf_stop(p_hwfn); 1849 if (rc) { 1850 DP_NOTICE(p_hwfn, 1851 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", 1852 rc); 1853 rc2 = -EINVAL; 1854 } 1855 1856 qed_wr(p_hwfn, p_ptt, 1857 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 1858 1859 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1860 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 1861 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 1862 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1863 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 1864 1865 qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 1866 1867 /* Disable Attention Generation */ 1868 qed_int_igu_disable_int(p_hwfn, p_ptt); 1869 1870 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 1871 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 1872 1873 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); 1874 1875 /* Need to wait 1ms to guarantee SBs are cleared */ 1876 usleep_range(1000, 2000); 1877 1878 /* Disable PF in HW blocks */ 1879 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); 1880 qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); 1881 1882 qed_mcp_unload_done(p_hwfn, p_ptt); 1883 if (rc) { 1884 DP_NOTICE(p_hwfn, 1885 "Failed sending a UNLOAD_DONE command. rc = %d.\n", 1886 rc); 1887 rc2 = -EINVAL; 1888 } 1889 } 1890 1891 if (IS_PF(cdev)) { 1892 p_hwfn = QED_LEADING_HWFN(cdev); 1893 p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; 1894 1895 /* Disable DMAE in PXP - in CMT, this should only be done for 1896 * first hw-function, and only after all transactions have 1897 * stopped for all active hw-functions. 1898 */ 1899 rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false); 1900 if (rc) { 1901 DP_NOTICE(p_hwfn, 1902 "qed_change_pci_hwfn failed. rc = %d.\n", rc); 1903 rc2 = -EINVAL; 1904 } 1905 } 1906 1907 return rc2; 1908 } 1909 1910 int qed_hw_stop_fastpath(struct qed_dev *cdev) 1911 { 1912 int j; 1913 1914 for_each_hwfn(cdev, j) { 1915 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 1916 struct qed_ptt *p_ptt; 1917 1918 if (IS_VF(cdev)) { 1919 qed_vf_pf_int_cleanup(p_hwfn); 1920 continue; 1921 } 1922 p_ptt = qed_ptt_acquire(p_hwfn); 1923 if (!p_ptt) 1924 return -EAGAIN; 1925 1926 DP_VERBOSE(p_hwfn, 1927 NETIF_MSG_IFDOWN, "Shutting down the fastpath\n"); 1928 1929 qed_wr(p_hwfn, p_ptt, 1930 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); 1931 1932 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); 1933 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); 1934 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 1935 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 1936 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 1937 1938 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 1939 1940 /* Need to wait 1ms to guarantee SBs are cleared */ 1941 usleep_range(1000, 2000); 1942 qed_ptt_release(p_hwfn, p_ptt); 1943 } 1944 1945 return 0; 1946 } 1947 1948 int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) 1949 { 1950 struct qed_ptt *p_ptt; 1951 1952 if (IS_VF(p_hwfn->cdev)) 1953 return 0; 1954 1955 p_ptt = qed_ptt_acquire(p_hwfn); 1956 if (!p_ptt) 1957 return -EAGAIN; 1958 1959 /* Re-open incoming traffic */ 1960 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); 1961 qed_ptt_release(p_hwfn, p_ptt); 1962 1963 return 0; 1964 } 1965 1966 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ 1967 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) 1968 { 1969 qed_ptt_pool_free(p_hwfn); 1970 kfree(p_hwfn->hw_info.p_igu_info); 1971 } 1972 1973 /* Setup bar access */ 1974 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) 1975 { 1976 /* clear indirect access */ 1977 if (QED_IS_AH(p_hwfn->cdev)) { 1978 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1979 PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0); 1980 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1981 PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0); 1982 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1983 PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0); 1984 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1985 PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0); 1986 } else { 1987 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1988 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); 1989 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1990 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); 1991 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1992 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); 1993 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1994 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); 1995 } 1996 1997 /* Clean Previous errors if such exist */ 1998 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 1999 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); 2000 2001 /* enable internal target-read */ 2002 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 2003 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 2004 } 2005 2006 static void get_function_id(struct qed_hwfn *p_hwfn) 2007 { 2008 /* ME Register */ 2009 p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, 2010 PXP_PF_ME_OPAQUE_ADDR); 2011 2012 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); 2013 2014 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; 2015 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2016 PXP_CONCRETE_FID_PFID); 2017 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, 2018 PXP_CONCRETE_FID_PORT); 2019 2020 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, 2021 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", 2022 p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); 2023 } 2024 2025 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) 2026 { 2027 u32 *feat_num = p_hwfn->hw_info.feat_num; 2028 struct qed_sb_cnt_info sb_cnt_info; 2029 u32 non_l2_sbs = 0; 2030 2031 if (IS_ENABLED(CONFIG_QED_RDMA) && 2032 p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { 2033 /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide 2034 * the status blocks equally between L2 / RoCE but with 2035 * consideration as to how many l2 queues / cnqs we have. 2036 */ 2037 feat_num[QED_RDMA_CNQ] = 2038 min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2, 2039 RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); 2040 2041 non_l2_sbs = feat_num[QED_RDMA_CNQ]; 2042 } 2043 2044 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || 2045 p_hwfn->hw_info.personality == QED_PCI_ETH) { 2046 /* Start by allocating VF queues, then PF's */ 2047 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 2048 qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); 2049 feat_num[QED_VF_L2_QUE] = min_t(u32, 2050 RESC_NUM(p_hwfn, QED_L2_QUEUE), 2051 sb_cnt_info.sb_iov_cnt); 2052 feat_num[QED_PF_L2_QUE] = min_t(u32, 2053 RESC_NUM(p_hwfn, QED_SB) - 2054 non_l2_sbs, 2055 RESC_NUM(p_hwfn, 2056 QED_L2_QUEUE) - 2057 FEAT_NUM(p_hwfn, 2058 QED_VF_L2_QUE)); 2059 } 2060 2061 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) 2062 feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB), 2063 RESC_NUM(p_hwfn, 2064 QED_CMDQS_CQS)); 2065 DP_VERBOSE(p_hwfn, 2066 NETIF_MSG_PROBE, 2067 "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n", 2068 (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), 2069 (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), 2070 (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), 2071 (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), 2072 RESC_NUM(p_hwfn, QED_SB)); 2073 } 2074 2075 const char *qed_hw_get_resc_name(enum qed_resources res_id) 2076 { 2077 switch (res_id) { 2078 case QED_L2_QUEUE: 2079 return "L2_QUEUE"; 2080 case QED_VPORT: 2081 return "VPORT"; 2082 case QED_RSS_ENG: 2083 return "RSS_ENG"; 2084 case QED_PQ: 2085 return "PQ"; 2086 case QED_RL: 2087 return "RL"; 2088 case QED_MAC: 2089 return "MAC"; 2090 case QED_VLAN: 2091 return "VLAN"; 2092 case QED_RDMA_CNQ_RAM: 2093 return "RDMA_CNQ_RAM"; 2094 case QED_ILT: 2095 return "ILT"; 2096 case QED_LL2_QUEUE: 2097 return "LL2_QUEUE"; 2098 case QED_CMDQS_CQS: 2099 return "CMDQS_CQS"; 2100 case QED_RDMA_STATS_QUEUE: 2101 return "RDMA_STATS_QUEUE"; 2102 case QED_BDQ: 2103 return "BDQ"; 2104 case QED_SB: 2105 return "SB"; 2106 default: 2107 return "UNKNOWN_RESOURCE"; 2108 } 2109 } 2110 2111 static int 2112 __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, 2113 struct qed_ptt *p_ptt, 2114 enum qed_resources res_id, 2115 u32 resc_max_val, u32 *p_mcp_resp) 2116 { 2117 int rc; 2118 2119 rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, 2120 resc_max_val, p_mcp_resp); 2121 if (rc) { 2122 DP_NOTICE(p_hwfn, 2123 "MFW response failure for a max value setting of resource %d [%s]\n", 2124 res_id, qed_hw_get_resc_name(res_id)); 2125 return rc; 2126 } 2127 2128 if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) 2129 DP_INFO(p_hwfn, 2130 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", 2131 res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp); 2132 2133 return 0; 2134 } 2135 2136 static int 2137 qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2138 { 2139 bool b_ah = QED_IS_AH(p_hwfn->cdev); 2140 u32 resc_max_val, mcp_resp; 2141 u8 res_id; 2142 int rc; 2143 2144 for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { 2145 switch (res_id) { 2146 case QED_LL2_QUEUE: 2147 resc_max_val = MAX_NUM_LL2_RX_QUEUES; 2148 break; 2149 case QED_RDMA_CNQ_RAM: 2150 /* No need for a case for QED_CMDQS_CQS since 2151 * CNQ/CMDQS are the same resource. 2152 */ 2153 resc_max_val = NUM_OF_CMDQS_CQS; 2154 break; 2155 case QED_RDMA_STATS_QUEUE: 2156 resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 2157 : RDMA_NUM_STATISTIC_COUNTERS_BB; 2158 break; 2159 case QED_BDQ: 2160 resc_max_val = BDQ_NUM_RESOURCES; 2161 break; 2162 default: 2163 continue; 2164 } 2165 2166 rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, 2167 resc_max_val, &mcp_resp); 2168 if (rc) 2169 return rc; 2170 2171 /* There's no point to continue to the next resource if the 2172 * command is not supported by the MFW. 2173 * We do continue if the command is supported but the resource 2174 * is unknown to the MFW. Such a resource will be later 2175 * configured with the default allocation values. 2176 */ 2177 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) 2178 return -EINVAL; 2179 } 2180 2181 return 0; 2182 } 2183 2184 static 2185 int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, 2186 enum qed_resources res_id, 2187 u32 *p_resc_num, u32 *p_resc_start) 2188 { 2189 u8 num_funcs = p_hwfn->num_funcs_on_engine; 2190 bool b_ah = QED_IS_AH(p_hwfn->cdev); 2191 struct qed_sb_cnt_info sb_cnt_info; 2192 2193 switch (res_id) { 2194 case QED_L2_QUEUE: 2195 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : 2196 MAX_NUM_L2_QUEUES_BB) / num_funcs; 2197 break; 2198 case QED_VPORT: 2199 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : 2200 MAX_NUM_VPORTS_BB) / num_funcs; 2201 break; 2202 case QED_RSS_ENG: 2203 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : 2204 ETH_RSS_ENGINE_NUM_BB) / num_funcs; 2205 break; 2206 case QED_PQ: 2207 *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : 2208 MAX_QM_TX_QUEUES_BB) / num_funcs; 2209 *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ 2210 break; 2211 case QED_RL: 2212 *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; 2213 break; 2214 case QED_MAC: 2215 case QED_VLAN: 2216 /* Each VFC resource can accommodate both a MAC and a VLAN */ 2217 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; 2218 break; 2219 case QED_ILT: 2220 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : 2221 PXP_NUM_ILT_RECORDS_BB) / num_funcs; 2222 break; 2223 case QED_LL2_QUEUE: 2224 *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; 2225 break; 2226 case QED_RDMA_CNQ_RAM: 2227 case QED_CMDQS_CQS: 2228 /* CNQ/CMDQS are the same resource */ 2229 *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs; 2230 break; 2231 case QED_RDMA_STATS_QUEUE: 2232 *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : 2233 RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs; 2234 break; 2235 case QED_BDQ: 2236 if (p_hwfn->hw_info.personality != QED_PCI_ISCSI && 2237 p_hwfn->hw_info.personality != QED_PCI_FCOE) 2238 *p_resc_num = 0; 2239 else 2240 *p_resc_num = 1; 2241 break; 2242 case QED_SB: 2243 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 2244 qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); 2245 *p_resc_num = sb_cnt_info.sb_cnt; 2246 break; 2247 default: 2248 return -EINVAL; 2249 } 2250 2251 switch (res_id) { 2252 case QED_BDQ: 2253 if (!*p_resc_num) 2254 *p_resc_start = 0; 2255 else if (p_hwfn->cdev->num_ports_in_engines == 4) 2256 *p_resc_start = p_hwfn->port_id; 2257 else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) 2258 *p_resc_start = p_hwfn->port_id; 2259 else if (p_hwfn->hw_info.personality == QED_PCI_FCOE) 2260 *p_resc_start = p_hwfn->port_id + 2; 2261 break; 2262 default: 2263 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; 2264 break; 2265 } 2266 2267 return 0; 2268 } 2269 2270 static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, 2271 enum qed_resources res_id) 2272 { 2273 u32 dflt_resc_num = 0, dflt_resc_start = 0; 2274 u32 mcp_resp, *p_resc_num, *p_resc_start; 2275 int rc; 2276 2277 p_resc_num = &RESC_NUM(p_hwfn, res_id); 2278 p_resc_start = &RESC_START(p_hwfn, res_id); 2279 2280 rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, 2281 &dflt_resc_start); 2282 if (rc) { 2283 DP_ERR(p_hwfn, 2284 "Failed to get default amount for resource %d [%s]\n", 2285 res_id, qed_hw_get_resc_name(res_id)); 2286 return rc; 2287 } 2288 2289 rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, 2290 &mcp_resp, p_resc_num, p_resc_start); 2291 if (rc) { 2292 DP_NOTICE(p_hwfn, 2293 "MFW response failure for an allocation request for resource %d [%s]\n", 2294 res_id, qed_hw_get_resc_name(res_id)); 2295 return rc; 2296 } 2297 2298 /* Default driver values are applied in the following cases: 2299 * - The resource allocation MB command is not supported by the MFW 2300 * - There is an internal error in the MFW while processing the request 2301 * - The resource ID is unknown to the MFW 2302 */ 2303 if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { 2304 DP_INFO(p_hwfn, 2305 "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", 2306 res_id, 2307 qed_hw_get_resc_name(res_id), 2308 mcp_resp, dflt_resc_num, dflt_resc_start); 2309 *p_resc_num = dflt_resc_num; 2310 *p_resc_start = dflt_resc_start; 2311 goto out; 2312 } 2313 2314 /* Special handling for status blocks; Would be revised in future */ 2315 if (res_id == QED_SB) { 2316 *p_resc_num -= 1; 2317 *p_resc_start -= p_hwfn->enabled_func_idx; 2318 } 2319 out: 2320 /* PQs have to divide by 8 [that's the HW granularity]. 2321 * Reduce number so it would fit. 2322 */ 2323 if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) { 2324 DP_INFO(p_hwfn, 2325 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", 2326 *p_resc_num, 2327 (*p_resc_num) & ~0x7, 2328 *p_resc_start, (*p_resc_start) & ~0x7); 2329 *p_resc_num &= ~0x7; 2330 *p_resc_start &= ~0x7; 2331 } 2332 2333 return 0; 2334 } 2335 2336 static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn) 2337 { 2338 int rc; 2339 u8 res_id; 2340 2341 for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { 2342 rc = __qed_hw_set_resc_info(p_hwfn, res_id); 2343 if (rc) 2344 return rc; 2345 } 2346 2347 return 0; 2348 } 2349 2350 static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2351 { 2352 struct qed_resc_unlock_params resc_unlock_params; 2353 struct qed_resc_lock_params resc_lock_params; 2354 bool b_ah = QED_IS_AH(p_hwfn->cdev); 2355 u8 res_id; 2356 int rc; 2357 2358 /* Setting the max values of the soft resources and the following 2359 * resources allocation queries should be atomic. Since several PFs can 2360 * run in parallel - a resource lock is needed. 2361 * If either the resource lock or resource set value commands are not 2362 * supported - skip the the max values setting, release the lock if 2363 * needed, and proceed to the queries. Other failures, including a 2364 * failure to acquire the lock, will cause this function to fail. 2365 */ 2366 qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, 2367 QED_RESC_LOCK_RESC_ALLOC, false); 2368 2369 rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); 2370 if (rc && rc != -EINVAL) { 2371 return rc; 2372 } else if (rc == -EINVAL) { 2373 DP_INFO(p_hwfn, 2374 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); 2375 } else if (!rc && !resc_lock_params.b_granted) { 2376 DP_NOTICE(p_hwfn, 2377 "Failed to acquire the resource lock for the resource allocation commands\n"); 2378 return -EBUSY; 2379 } else { 2380 rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt); 2381 if (rc && rc != -EINVAL) { 2382 DP_NOTICE(p_hwfn, 2383 "Failed to set the max values of the soft resources\n"); 2384 goto unlock_and_exit; 2385 } else if (rc == -EINVAL) { 2386 DP_INFO(p_hwfn, 2387 "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); 2388 rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, 2389 &resc_unlock_params); 2390 if (rc) 2391 DP_INFO(p_hwfn, 2392 "Failed to release the resource lock for the resource allocation commands\n"); 2393 } 2394 } 2395 2396 rc = qed_hw_set_resc_info(p_hwfn); 2397 if (rc) 2398 goto unlock_and_exit; 2399 2400 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { 2401 rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); 2402 if (rc) 2403 DP_INFO(p_hwfn, 2404 "Failed to release the resource lock for the resource allocation commands\n"); 2405 } 2406 2407 /* Sanity for ILT */ 2408 if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) || 2409 (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) { 2410 DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", 2411 RESC_START(p_hwfn, QED_ILT), 2412 RESC_END(p_hwfn, QED_ILT) - 1); 2413 return -EINVAL; 2414 } 2415 2416 qed_hw_set_feat(p_hwfn); 2417 2418 for (res_id = 0; res_id < QED_MAX_RESC; res_id++) 2419 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n", 2420 qed_hw_get_resc_name(res_id), 2421 RESC_NUM(p_hwfn, res_id), 2422 RESC_START(p_hwfn, res_id)); 2423 2424 return 0; 2425 2426 unlock_and_exit: 2427 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) 2428 qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); 2429 return rc; 2430 } 2431 2432 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2433 { 2434 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; 2435 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; 2436 struct qed_mcp_link_params *link; 2437 2438 /* Read global nvm_cfg address */ 2439 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); 2440 2441 /* Verify MCP has initialized it */ 2442 if (!nvm_cfg_addr) { 2443 DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); 2444 return -EINVAL; 2445 } 2446 2447 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 2448 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 2449 2450 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2451 offsetof(struct nvm_cfg1, glob) + 2452 offsetof(struct nvm_cfg1_glob, core_cfg); 2453 2454 core_cfg = qed_rd(p_hwfn, p_ptt, addr); 2455 2456 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> 2457 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { 2458 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: 2459 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; 2460 break; 2461 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: 2462 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; 2463 break; 2464 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: 2465 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; 2466 break; 2467 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: 2468 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; 2469 break; 2470 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: 2471 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; 2472 break; 2473 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: 2474 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; 2475 break; 2476 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: 2477 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; 2478 break; 2479 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: 2480 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; 2481 break; 2482 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: 2483 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G; 2484 break; 2485 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: 2486 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; 2487 break; 2488 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: 2489 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G; 2490 break; 2491 default: 2492 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); 2493 break; 2494 } 2495 2496 /* Read default link configuration */ 2497 link = &p_hwfn->mcp_info->link_input; 2498 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2499 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); 2500 link_temp = qed_rd(p_hwfn, p_ptt, 2501 port_cfg_addr + 2502 offsetof(struct nvm_cfg1_port, speed_cap_mask)); 2503 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; 2504 link->speed.advertised_speeds = link_temp; 2505 2506 link_temp = link->speed.advertised_speeds; 2507 p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; 2508 2509 link_temp = qed_rd(p_hwfn, p_ptt, 2510 port_cfg_addr + 2511 offsetof(struct nvm_cfg1_port, link_settings)); 2512 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> 2513 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { 2514 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: 2515 link->speed.autoneg = true; 2516 break; 2517 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: 2518 link->speed.forced_speed = 1000; 2519 break; 2520 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: 2521 link->speed.forced_speed = 10000; 2522 break; 2523 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: 2524 link->speed.forced_speed = 25000; 2525 break; 2526 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: 2527 link->speed.forced_speed = 40000; 2528 break; 2529 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: 2530 link->speed.forced_speed = 50000; 2531 break; 2532 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: 2533 link->speed.forced_speed = 100000; 2534 break; 2535 default: 2536 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); 2537 } 2538 2539 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; 2540 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; 2541 link->pause.autoneg = !!(link_temp & 2542 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); 2543 link->pause.forced_rx = !!(link_temp & 2544 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); 2545 link->pause.forced_tx = !!(link_temp & 2546 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); 2547 link->loopback_mode = 0; 2548 2549 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 2550 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n", 2551 link->speed.forced_speed, link->speed.advertised_speeds, 2552 link->speed.autoneg, link->pause.autoneg); 2553 2554 /* Read Multi-function information from shmem */ 2555 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2556 offsetof(struct nvm_cfg1, glob) + 2557 offsetof(struct nvm_cfg1_glob, generic_cont0); 2558 2559 generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); 2560 2561 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 2562 NVM_CFG1_GLOB_MF_MODE_OFFSET; 2563 2564 switch (mf_mode) { 2565 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 2566 p_hwfn->cdev->mf_mode = QED_MF_OVLAN; 2567 break; 2568 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 2569 p_hwfn->cdev->mf_mode = QED_MF_NPAR; 2570 break; 2571 case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 2572 p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; 2573 break; 2574 } 2575 DP_INFO(p_hwfn, "Multi function mode is %08x\n", 2576 p_hwfn->cdev->mf_mode); 2577 2578 /* Read Multi-function information from shmem */ 2579 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2580 offsetof(struct nvm_cfg1, glob) + 2581 offsetof(struct nvm_cfg1_glob, device_capabilities); 2582 2583 device_capabilities = qed_rd(p_hwfn, p_ptt, addr); 2584 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) 2585 __set_bit(QED_DEV_CAP_ETH, 2586 &p_hwfn->hw_info.device_capabilities); 2587 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) 2588 __set_bit(QED_DEV_CAP_FCOE, 2589 &p_hwfn->hw_info.device_capabilities); 2590 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) 2591 __set_bit(QED_DEV_CAP_ISCSI, 2592 &p_hwfn->hw_info.device_capabilities); 2593 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) 2594 __set_bit(QED_DEV_CAP_ROCE, 2595 &p_hwfn->hw_info.device_capabilities); 2596 2597 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); 2598 } 2599 2600 static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2601 { 2602 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; 2603 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; 2604 struct qed_dev *cdev = p_hwfn->cdev; 2605 2606 num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; 2607 2608 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values 2609 * in the other bits are selected. 2610 * Bits 1-15 are for functions 1-15, respectively, and their value is 2611 * '0' only for enabled functions (function 0 always exists and 2612 * enabled). 2613 * In case of CMT, only the "even" functions are enabled, and thus the 2614 * number of functions for both hwfns is learnt from the same bits. 2615 */ 2616 reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); 2617 2618 if (reg_function_hide & 0x1) { 2619 if (QED_IS_BB(cdev)) { 2620 if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) { 2621 num_funcs = 0; 2622 eng_mask = 0xaaaa; 2623 } else { 2624 num_funcs = 1; 2625 eng_mask = 0x5554; 2626 } 2627 } else { 2628 num_funcs = 1; 2629 eng_mask = 0xfffe; 2630 } 2631 2632 /* Get the number of the enabled functions on the engine */ 2633 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; 2634 while (tmp) { 2635 if (tmp & 0x1) 2636 num_funcs++; 2637 tmp >>= 0x1; 2638 } 2639 2640 /* Get the PF index within the enabled functions */ 2641 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; 2642 tmp = reg_function_hide & eng_mask & low_pfs_mask; 2643 while (tmp) { 2644 if (tmp & 0x1) 2645 enabled_func_idx--; 2646 tmp >>= 0x1; 2647 } 2648 } 2649 2650 p_hwfn->num_funcs_on_engine = num_funcs; 2651 p_hwfn->enabled_func_idx = enabled_func_idx; 2652 2653 DP_VERBOSE(p_hwfn, 2654 NETIF_MSG_PROBE, 2655 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", 2656 p_hwfn->rel_pf_id, 2657 p_hwfn->abs_pf_id, 2658 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); 2659 } 2660 2661 static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, 2662 struct qed_ptt *p_ptt) 2663 { 2664 u32 port_mode; 2665 2666 port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); 2667 2668 if (port_mode < 3) { 2669 p_hwfn->cdev->num_ports_in_engines = 1; 2670 } else if (port_mode <= 5) { 2671 p_hwfn->cdev->num_ports_in_engines = 2; 2672 } else { 2673 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", 2674 p_hwfn->cdev->num_ports_in_engines); 2675 2676 /* Default num_ports_in_engines to something */ 2677 p_hwfn->cdev->num_ports_in_engines = 1; 2678 } 2679 } 2680 2681 static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn, 2682 struct qed_ptt *p_ptt) 2683 { 2684 u32 port; 2685 int i; 2686 2687 p_hwfn->cdev->num_ports_in_engines = 0; 2688 2689 for (i = 0; i < MAX_NUM_PORTS_K2; i++) { 2690 port = qed_rd(p_hwfn, p_ptt, 2691 CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4)); 2692 if (port & 1) 2693 p_hwfn->cdev->num_ports_in_engines++; 2694 } 2695 2696 if (!p_hwfn->cdev->num_ports_in_engines) { 2697 DP_NOTICE(p_hwfn, "All NIG ports are inactive\n"); 2698 2699 /* Default num_ports_in_engine to something */ 2700 p_hwfn->cdev->num_ports_in_engines = 1; 2701 } 2702 } 2703 2704 static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2705 { 2706 if (QED_IS_BB(p_hwfn->cdev)) 2707 qed_hw_info_port_num_bb(p_hwfn, p_ptt); 2708 else 2709 qed_hw_info_port_num_ah(p_hwfn, p_ptt); 2710 } 2711 2712 static int 2713 qed_get_hw_info(struct qed_hwfn *p_hwfn, 2714 struct qed_ptt *p_ptt, 2715 enum qed_pci_personality personality) 2716 { 2717 int rc; 2718 2719 /* Since all information is common, only first hwfns should do this */ 2720 if (IS_LEAD_HWFN(p_hwfn)) { 2721 rc = qed_iov_hw_info(p_hwfn); 2722 if (rc) 2723 return rc; 2724 } 2725 2726 qed_hw_info_port_num(p_hwfn, p_ptt); 2727 2728 qed_hw_get_nvm_info(p_hwfn, p_ptt); 2729 2730 rc = qed_int_igu_read_cam(p_hwfn, p_ptt); 2731 if (rc) 2732 return rc; 2733 2734 if (qed_mcp_is_init(p_hwfn)) 2735 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr, 2736 p_hwfn->mcp_info->func_info.mac); 2737 else 2738 eth_random_addr(p_hwfn->hw_info.hw_mac_addr); 2739 2740 if (qed_mcp_is_init(p_hwfn)) { 2741 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET) 2742 p_hwfn->hw_info.ovlan = 2743 p_hwfn->mcp_info->func_info.ovlan; 2744 2745 qed_mcp_cmd_port_init(p_hwfn, p_ptt); 2746 } 2747 2748 if (qed_mcp_is_init(p_hwfn)) { 2749 enum qed_pci_personality protocol; 2750 2751 protocol = p_hwfn->mcp_info->func_info.protocol; 2752 p_hwfn->hw_info.personality = protocol; 2753 } 2754 2755 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; 2756 p_hwfn->hw_info.num_active_tc = 1; 2757 2758 qed_get_num_funcs(p_hwfn, p_ptt); 2759 2760 if (qed_mcp_is_init(p_hwfn)) 2761 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; 2762 2763 return qed_hw_get_resc(p_hwfn, p_ptt); 2764 } 2765 2766 static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2767 { 2768 struct qed_dev *cdev = p_hwfn->cdev; 2769 u16 device_id_mask; 2770 u32 tmp; 2771 2772 /* Read Vendor Id / Device Id */ 2773 pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); 2774 pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); 2775 2776 /* Determine type */ 2777 device_id_mask = cdev->device_id & QED_DEV_ID_MASK; 2778 switch (device_id_mask) { 2779 case QED_DEV_ID_MASK_BB: 2780 cdev->type = QED_DEV_TYPE_BB; 2781 break; 2782 case QED_DEV_ID_MASK_AH: 2783 cdev->type = QED_DEV_TYPE_AH; 2784 break; 2785 default: 2786 DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id); 2787 return -EBUSY; 2788 } 2789 2790 cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); 2791 cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); 2792 2793 MASK_FIELD(CHIP_REV, cdev->chip_rev); 2794 2795 /* Learn number of HW-functions */ 2796 tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); 2797 2798 if (tmp & (1 << p_hwfn->rel_pf_id)) { 2799 DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); 2800 cdev->num_hwfns = 2; 2801 } else { 2802 cdev->num_hwfns = 1; 2803 } 2804 2805 cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt, 2806 MISCS_REG_CHIP_TEST_REG) >> 4; 2807 MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); 2808 cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); 2809 MASK_FIELD(CHIP_METAL, cdev->chip_metal); 2810 2811 DP_INFO(cdev->hwfns, 2812 "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 2813 QED_IS_BB(cdev) ? "BB" : "AH", 2814 'A' + cdev->chip_rev, 2815 (int)cdev->chip_metal, 2816 cdev->chip_num, cdev->chip_rev, 2817 cdev->chip_bond_id, cdev->chip_metal); 2818 2819 if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) { 2820 DP_NOTICE(cdev->hwfns, 2821 "The chip type/rev (BB A0) is not supported!\n"); 2822 return -EINVAL; 2823 } 2824 2825 return 0; 2826 } 2827 2828 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, 2829 void __iomem *p_regview, 2830 void __iomem *p_doorbells, 2831 enum qed_pci_personality personality) 2832 { 2833 int rc = 0; 2834 2835 /* Split PCI bars evenly between hwfns */ 2836 p_hwfn->regview = p_regview; 2837 p_hwfn->doorbells = p_doorbells; 2838 2839 if (IS_VF(p_hwfn->cdev)) 2840 return qed_vf_hw_prepare(p_hwfn); 2841 2842 /* Validate that chip access is feasible */ 2843 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { 2844 DP_ERR(p_hwfn, 2845 "Reading the ME register returns all Fs; Preventing further chip access\n"); 2846 return -EINVAL; 2847 } 2848 2849 get_function_id(p_hwfn); 2850 2851 /* Allocate PTT pool */ 2852 rc = qed_ptt_pool_alloc(p_hwfn); 2853 if (rc) 2854 goto err0; 2855 2856 /* Allocate the main PTT */ 2857 p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 2858 2859 /* First hwfn learns basic information, e.g., number of hwfns */ 2860 if (!p_hwfn->my_id) { 2861 rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); 2862 if (rc) 2863 goto err1; 2864 } 2865 2866 qed_hw_hwfn_prepare(p_hwfn); 2867 2868 /* Initialize MCP structure */ 2869 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); 2870 if (rc) { 2871 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n"); 2872 goto err1; 2873 } 2874 2875 /* Read the device configuration information from the HW and SHMEM */ 2876 rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality); 2877 if (rc) { 2878 DP_NOTICE(p_hwfn, "Failed to get HW information\n"); 2879 goto err2; 2880 } 2881 2882 /* Sending a mailbox to the MFW should be done after qed_get_hw_info() 2883 * is called as it sets the ports number in an engine. 2884 */ 2885 if (IS_LEAD_HWFN(p_hwfn)) { 2886 rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); 2887 if (rc) 2888 DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); 2889 } 2890 2891 /* Allocate the init RT array and initialize the init-ops engine */ 2892 rc = qed_init_alloc(p_hwfn); 2893 if (rc) 2894 goto err2; 2895 2896 return rc; 2897 err2: 2898 if (IS_LEAD_HWFN(p_hwfn)) 2899 qed_iov_free_hw_info(p_hwfn->cdev); 2900 qed_mcp_free(p_hwfn); 2901 err1: 2902 qed_hw_hwfn_free(p_hwfn); 2903 err0: 2904 return rc; 2905 } 2906 2907 int qed_hw_prepare(struct qed_dev *cdev, 2908 int personality) 2909 { 2910 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2911 int rc; 2912 2913 /* Store the precompiled init data ptrs */ 2914 if (IS_PF(cdev)) 2915 qed_init_iro_array(cdev); 2916 2917 /* Initialize the first hwfn - will learn number of hwfns */ 2918 rc = qed_hw_prepare_single(p_hwfn, 2919 cdev->regview, 2920 cdev->doorbells, personality); 2921 if (rc) 2922 return rc; 2923 2924 personality = p_hwfn->hw_info.personality; 2925 2926 /* Initialize the rest of the hwfns */ 2927 if (cdev->num_hwfns > 1) { 2928 void __iomem *p_regview, *p_doorbell; 2929 u8 __iomem *addr; 2930 2931 /* adjust bar offset for second engine */ 2932 addr = cdev->regview + 2933 qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, 2934 BAR_ID_0) / 2; 2935 p_regview = addr; 2936 2937 addr = cdev->doorbells + 2938 qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, 2939 BAR_ID_1) / 2; 2940 p_doorbell = addr; 2941 2942 /* prepare second hw function */ 2943 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, 2944 p_doorbell, personality); 2945 2946 /* in case of error, need to free the previously 2947 * initiliazed hwfn 0. 2948 */ 2949 if (rc) { 2950 if (IS_PF(cdev)) { 2951 qed_init_free(p_hwfn); 2952 qed_mcp_free(p_hwfn); 2953 qed_hw_hwfn_free(p_hwfn); 2954 } 2955 } 2956 } 2957 2958 return rc; 2959 } 2960 2961 void qed_hw_remove(struct qed_dev *cdev) 2962 { 2963 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2964 int i; 2965 2966 if (IS_PF(cdev)) 2967 qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, 2968 QED_OV_DRIVER_STATE_NOT_LOADED); 2969 2970 for_each_hwfn(cdev, i) { 2971 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2972 2973 if (IS_VF(cdev)) { 2974 qed_vf_pf_release(p_hwfn); 2975 continue; 2976 } 2977 2978 qed_init_free(p_hwfn); 2979 qed_hw_hwfn_free(p_hwfn); 2980 qed_mcp_free(p_hwfn); 2981 } 2982 2983 qed_iov_free_hw_info(cdev); 2984 } 2985 2986 static void qed_chain_free_next_ptr(struct qed_dev *cdev, 2987 struct qed_chain *p_chain) 2988 { 2989 void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL; 2990 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; 2991 struct qed_chain_next *p_next; 2992 u32 size, i; 2993 2994 if (!p_virt) 2995 return; 2996 2997 size = p_chain->elem_size * p_chain->usable_per_page; 2998 2999 for (i = 0; i < p_chain->page_cnt; i++) { 3000 if (!p_virt) 3001 break; 3002 3003 p_next = (struct qed_chain_next *)((u8 *)p_virt + size); 3004 p_virt_next = p_next->next_virt; 3005 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); 3006 3007 dma_free_coherent(&cdev->pdev->dev, 3008 QED_CHAIN_PAGE_SIZE, p_virt, p_phys); 3009 3010 p_virt = p_virt_next; 3011 p_phys = p_phys_next; 3012 } 3013 } 3014 3015 static void qed_chain_free_single(struct qed_dev *cdev, 3016 struct qed_chain *p_chain) 3017 { 3018 if (!p_chain->p_virt_addr) 3019 return; 3020 3021 dma_free_coherent(&cdev->pdev->dev, 3022 QED_CHAIN_PAGE_SIZE, 3023 p_chain->p_virt_addr, p_chain->p_phys_addr); 3024 } 3025 3026 static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) 3027 { 3028 void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; 3029 u32 page_cnt = p_chain->page_cnt, i, pbl_size; 3030 u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table; 3031 3032 if (!pp_virt_addr_tbl) 3033 return; 3034 3035 if (!p_pbl_virt) 3036 goto out; 3037 3038 for (i = 0; i < page_cnt; i++) { 3039 if (!pp_virt_addr_tbl[i]) 3040 break; 3041 3042 dma_free_coherent(&cdev->pdev->dev, 3043 QED_CHAIN_PAGE_SIZE, 3044 pp_virt_addr_tbl[i], 3045 *(dma_addr_t *)p_pbl_virt); 3046 3047 p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; 3048 } 3049 3050 pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 3051 dma_free_coherent(&cdev->pdev->dev, 3052 pbl_size, 3053 p_chain->pbl_sp.p_virt_table, 3054 p_chain->pbl_sp.p_phys_table); 3055 out: 3056 vfree(p_chain->pbl.pp_virt_addr_tbl); 3057 } 3058 3059 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) 3060 { 3061 switch (p_chain->mode) { 3062 case QED_CHAIN_MODE_NEXT_PTR: 3063 qed_chain_free_next_ptr(cdev, p_chain); 3064 break; 3065 case QED_CHAIN_MODE_SINGLE: 3066 qed_chain_free_single(cdev, p_chain); 3067 break; 3068 case QED_CHAIN_MODE_PBL: 3069 qed_chain_free_pbl(cdev, p_chain); 3070 break; 3071 } 3072 } 3073 3074 static int 3075 qed_chain_alloc_sanity_check(struct qed_dev *cdev, 3076 enum qed_chain_cnt_type cnt_type, 3077 size_t elem_size, u32 page_cnt) 3078 { 3079 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; 3080 3081 /* The actual chain size can be larger than the maximal possible value 3082 * after rounding up the requested elements number to pages, and after 3083 * taking into acount the unusuable elements (next-ptr elements). 3084 * The size of a "u16" chain can be (U16_MAX + 1) since the chain 3085 * size/capacity fields are of a u32 type. 3086 */ 3087 if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && 3088 chain_size > ((u32)U16_MAX + 1)) || 3089 (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) { 3090 DP_NOTICE(cdev, 3091 "The actual chain size (0x%llx) is larger than the maximal possible value\n", 3092 chain_size); 3093 return -EINVAL; 3094 } 3095 3096 return 0; 3097 } 3098 3099 static int 3100 qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain) 3101 { 3102 void *p_virt = NULL, *p_virt_prev = NULL; 3103 dma_addr_t p_phys = 0; 3104 u32 i; 3105 3106 for (i = 0; i < p_chain->page_cnt; i++) { 3107 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 3108 QED_CHAIN_PAGE_SIZE, 3109 &p_phys, GFP_KERNEL); 3110 if (!p_virt) 3111 return -ENOMEM; 3112 3113 if (i == 0) { 3114 qed_chain_init_mem(p_chain, p_virt, p_phys); 3115 qed_chain_reset(p_chain); 3116 } else { 3117 qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, 3118 p_virt, p_phys); 3119 } 3120 3121 p_virt_prev = p_virt; 3122 } 3123 /* Last page's next element should point to the beginning of the 3124 * chain. 3125 */ 3126 qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, 3127 p_chain->p_virt_addr, 3128 p_chain->p_phys_addr); 3129 3130 return 0; 3131 } 3132 3133 static int 3134 qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) 3135 { 3136 dma_addr_t p_phys = 0; 3137 void *p_virt = NULL; 3138 3139 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 3140 QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); 3141 if (!p_virt) 3142 return -ENOMEM; 3143 3144 qed_chain_init_mem(p_chain, p_virt, p_phys); 3145 qed_chain_reset(p_chain); 3146 3147 return 0; 3148 } 3149 3150 static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) 3151 { 3152 u32 page_cnt = p_chain->page_cnt, size, i; 3153 dma_addr_t p_phys = 0, p_pbl_phys = 0; 3154 void **pp_virt_addr_tbl = NULL; 3155 u8 *p_pbl_virt = NULL; 3156 void *p_virt = NULL; 3157 3158 size = page_cnt * sizeof(*pp_virt_addr_tbl); 3159 pp_virt_addr_tbl = vzalloc(size); 3160 if (!pp_virt_addr_tbl) 3161 return -ENOMEM; 3162 3163 /* The allocation of the PBL table is done with its full size, since it 3164 * is expected to be successive. 3165 * qed_chain_init_pbl_mem() is called even in a case of an allocation 3166 * failure, since pp_virt_addr_tbl was previously allocated, and it 3167 * should be saved to allow its freeing during the error flow. 3168 */ 3169 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; 3170 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, 3171 size, &p_pbl_phys, GFP_KERNEL); 3172 qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, 3173 pp_virt_addr_tbl); 3174 if (!p_pbl_virt) 3175 return -ENOMEM; 3176 3177 for (i = 0; i < page_cnt; i++) { 3178 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 3179 QED_CHAIN_PAGE_SIZE, 3180 &p_phys, GFP_KERNEL); 3181 if (!p_virt) 3182 return -ENOMEM; 3183 3184 if (i == 0) { 3185 qed_chain_init_mem(p_chain, p_virt, p_phys); 3186 qed_chain_reset(p_chain); 3187 } 3188 3189 /* Fill the PBL table with the physical address of the page */ 3190 *(dma_addr_t *)p_pbl_virt = p_phys; 3191 /* Keep the virtual address of the page */ 3192 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; 3193 3194 p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; 3195 } 3196 3197 return 0; 3198 } 3199 3200 int qed_chain_alloc(struct qed_dev *cdev, 3201 enum qed_chain_use_mode intended_use, 3202 enum qed_chain_mode mode, 3203 enum qed_chain_cnt_type cnt_type, 3204 u32 num_elems, size_t elem_size, struct qed_chain *p_chain) 3205 { 3206 u32 page_cnt; 3207 int rc = 0; 3208 3209 if (mode == QED_CHAIN_MODE_SINGLE) 3210 page_cnt = 1; 3211 else 3212 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); 3213 3214 rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); 3215 if (rc) { 3216 DP_NOTICE(cdev, 3217 "Cannot allocate a chain with the given arguments:\n"); 3218 DP_NOTICE(cdev, 3219 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", 3220 intended_use, mode, cnt_type, num_elems, elem_size); 3221 return rc; 3222 } 3223 3224 qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use, 3225 mode, cnt_type); 3226 3227 switch (mode) { 3228 case QED_CHAIN_MODE_NEXT_PTR: 3229 rc = qed_chain_alloc_next_ptr(cdev, p_chain); 3230 break; 3231 case QED_CHAIN_MODE_SINGLE: 3232 rc = qed_chain_alloc_single(cdev, p_chain); 3233 break; 3234 case QED_CHAIN_MODE_PBL: 3235 rc = qed_chain_alloc_pbl(cdev, p_chain); 3236 break; 3237 } 3238 if (rc) 3239 goto nomem; 3240 3241 return 0; 3242 3243 nomem: 3244 qed_chain_free(cdev, p_chain); 3245 return rc; 3246 } 3247 3248 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) 3249 { 3250 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { 3251 u16 min, max; 3252 3253 min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); 3254 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); 3255 DP_NOTICE(p_hwfn, 3256 "l2_queue id [%d] is not valid, available indices [%d - %d]\n", 3257 src_id, min, max); 3258 3259 return -EINVAL; 3260 } 3261 3262 *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; 3263 3264 return 0; 3265 } 3266 3267 int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) 3268 { 3269 if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { 3270 u8 min, max; 3271 3272 min = (u8)RESC_START(p_hwfn, QED_VPORT); 3273 max = min + RESC_NUM(p_hwfn, QED_VPORT); 3274 DP_NOTICE(p_hwfn, 3275 "vport id [%d] is not valid, available indices [%d - %d]\n", 3276 src_id, min, max); 3277 3278 return -EINVAL; 3279 } 3280 3281 *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; 3282 3283 return 0; 3284 } 3285 3286 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) 3287 { 3288 if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { 3289 u8 min, max; 3290 3291 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); 3292 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); 3293 DP_NOTICE(p_hwfn, 3294 "rss_eng id [%d] is not valid, available indices [%d - %d]\n", 3295 src_id, min, max); 3296 3297 return -EINVAL; 3298 } 3299 3300 *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; 3301 3302 return 0; 3303 } 3304 3305 static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low, 3306 u8 *p_filter) 3307 { 3308 *p_high = p_filter[1] | (p_filter[0] << 8); 3309 *p_low = p_filter[5] | (p_filter[4] << 8) | 3310 (p_filter[3] << 16) | (p_filter[2] << 24); 3311 } 3312 3313 int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, 3314 struct qed_ptt *p_ptt, u8 *p_filter) 3315 { 3316 u32 high = 0, low = 0, en; 3317 int i; 3318 3319 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 3320 return 0; 3321 3322 qed_llh_mac_to_filter(&high, &low, p_filter); 3323 3324 /* Find a free entry and utilize it */ 3325 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 3326 en = qed_rd(p_hwfn, p_ptt, 3327 NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); 3328 if (en) 3329 continue; 3330 qed_wr(p_hwfn, p_ptt, 3331 NIG_REG_LLH_FUNC_FILTER_VALUE + 3332 2 * i * sizeof(u32), low); 3333 qed_wr(p_hwfn, p_ptt, 3334 NIG_REG_LLH_FUNC_FILTER_VALUE + 3335 (2 * i + 1) * sizeof(u32), high); 3336 qed_wr(p_hwfn, p_ptt, 3337 NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); 3338 qed_wr(p_hwfn, p_ptt, 3339 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 3340 i * sizeof(u32), 0); 3341 qed_wr(p_hwfn, p_ptt, 3342 NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); 3343 break; 3344 } 3345 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { 3346 DP_NOTICE(p_hwfn, 3347 "Failed to find an empty LLH filter to utilize\n"); 3348 return -EINVAL; 3349 } 3350 3351 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 3352 "mac: %pM is added at %d\n", 3353 p_filter, i); 3354 3355 return 0; 3356 } 3357 3358 void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, 3359 struct qed_ptt *p_ptt, u8 *p_filter) 3360 { 3361 u32 high = 0, low = 0; 3362 int i; 3363 3364 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 3365 return; 3366 3367 qed_llh_mac_to_filter(&high, &low, p_filter); 3368 3369 /* Find the entry and clean it */ 3370 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 3371 if (qed_rd(p_hwfn, p_ptt, 3372 NIG_REG_LLH_FUNC_FILTER_VALUE + 3373 2 * i * sizeof(u32)) != low) 3374 continue; 3375 if (qed_rd(p_hwfn, p_ptt, 3376 NIG_REG_LLH_FUNC_FILTER_VALUE + 3377 (2 * i + 1) * sizeof(u32)) != high) 3378 continue; 3379 3380 qed_wr(p_hwfn, p_ptt, 3381 NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); 3382 qed_wr(p_hwfn, p_ptt, 3383 NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); 3384 qed_wr(p_hwfn, p_ptt, 3385 NIG_REG_LLH_FUNC_FILTER_VALUE + 3386 (2 * i + 1) * sizeof(u32), 0); 3387 3388 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 3389 "mac: %pM is removed from %d\n", 3390 p_filter, i); 3391 break; 3392 } 3393 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 3394 DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); 3395 } 3396 3397 int 3398 qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, 3399 struct qed_ptt *p_ptt, 3400 u16 source_port_or_eth_type, 3401 u16 dest_port, enum qed_llh_port_filter_type_t type) 3402 { 3403 u32 high = 0, low = 0, en; 3404 int i; 3405 3406 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 3407 return 0; 3408 3409 switch (type) { 3410 case QED_LLH_FILTER_ETHERTYPE: 3411 high = source_port_or_eth_type; 3412 break; 3413 case QED_LLH_FILTER_TCP_SRC_PORT: 3414 case QED_LLH_FILTER_UDP_SRC_PORT: 3415 low = source_port_or_eth_type << 16; 3416 break; 3417 case QED_LLH_FILTER_TCP_DEST_PORT: 3418 case QED_LLH_FILTER_UDP_DEST_PORT: 3419 low = dest_port; 3420 break; 3421 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 3422 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 3423 low = (source_port_or_eth_type << 16) | dest_port; 3424 break; 3425 default: 3426 DP_NOTICE(p_hwfn, 3427 "Non valid LLH protocol filter type %d\n", type); 3428 return -EINVAL; 3429 } 3430 /* Find a free entry and utilize it */ 3431 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 3432 en = qed_rd(p_hwfn, p_ptt, 3433 NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); 3434 if (en) 3435 continue; 3436 qed_wr(p_hwfn, p_ptt, 3437 NIG_REG_LLH_FUNC_FILTER_VALUE + 3438 2 * i * sizeof(u32), low); 3439 qed_wr(p_hwfn, p_ptt, 3440 NIG_REG_LLH_FUNC_FILTER_VALUE + 3441 (2 * i + 1) * sizeof(u32), high); 3442 qed_wr(p_hwfn, p_ptt, 3443 NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1); 3444 qed_wr(p_hwfn, p_ptt, 3445 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 3446 i * sizeof(u32), 1 << type); 3447 qed_wr(p_hwfn, p_ptt, 3448 NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); 3449 break; 3450 } 3451 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { 3452 DP_NOTICE(p_hwfn, 3453 "Failed to find an empty LLH filter to utilize\n"); 3454 return -EINVAL; 3455 } 3456 switch (type) { 3457 case QED_LLH_FILTER_ETHERTYPE: 3458 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 3459 "ETH type %x is added at %d\n", 3460 source_port_or_eth_type, i); 3461 break; 3462 case QED_LLH_FILTER_TCP_SRC_PORT: 3463 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 3464 "TCP src port %x is added at %d\n", 3465 source_port_or_eth_type, i); 3466 break; 3467 case QED_LLH_FILTER_UDP_SRC_PORT: 3468 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 3469 "UDP src port %x is added at %d\n", 3470 source_port_or_eth_type, i); 3471 break; 3472 case QED_LLH_FILTER_TCP_DEST_PORT: 3473 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 3474 "TCP dst port %x is added at %d\n", dest_port, i); 3475 break; 3476 case QED_LLH_FILTER_UDP_DEST_PORT: 3477 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 3478 "UDP dst port %x is added at %d\n", dest_port, i); 3479 break; 3480 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 3481 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 3482 "TCP src/dst ports %x/%x are added at %d\n", 3483 source_port_or_eth_type, dest_port, i); 3484 break; 3485 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 3486 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 3487 "UDP src/dst ports %x/%x are added at %d\n", 3488 source_port_or_eth_type, dest_port, i); 3489 break; 3490 } 3491 return 0; 3492 } 3493 3494 void 3495 qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, 3496 struct qed_ptt *p_ptt, 3497 u16 source_port_or_eth_type, 3498 u16 dest_port, 3499 enum qed_llh_port_filter_type_t type) 3500 { 3501 u32 high = 0, low = 0; 3502 int i; 3503 3504 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 3505 return; 3506 3507 switch (type) { 3508 case QED_LLH_FILTER_ETHERTYPE: 3509 high = source_port_or_eth_type; 3510 break; 3511 case QED_LLH_FILTER_TCP_SRC_PORT: 3512 case QED_LLH_FILTER_UDP_SRC_PORT: 3513 low = source_port_or_eth_type << 16; 3514 break; 3515 case QED_LLH_FILTER_TCP_DEST_PORT: 3516 case QED_LLH_FILTER_UDP_DEST_PORT: 3517 low = dest_port; 3518 break; 3519 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: 3520 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: 3521 low = (source_port_or_eth_type << 16) | dest_port; 3522 break; 3523 default: 3524 DP_NOTICE(p_hwfn, 3525 "Non valid LLH protocol filter type %d\n", type); 3526 return; 3527 } 3528 3529 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { 3530 if (!qed_rd(p_hwfn, p_ptt, 3531 NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32))) 3532 continue; 3533 if (!qed_rd(p_hwfn, p_ptt, 3534 NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32))) 3535 continue; 3536 if (!(qed_rd(p_hwfn, p_ptt, 3537 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 3538 i * sizeof(u32)) & BIT(type))) 3539 continue; 3540 if (qed_rd(p_hwfn, p_ptt, 3541 NIG_REG_LLH_FUNC_FILTER_VALUE + 3542 2 * i * sizeof(u32)) != low) 3543 continue; 3544 if (qed_rd(p_hwfn, p_ptt, 3545 NIG_REG_LLH_FUNC_FILTER_VALUE + 3546 (2 * i + 1) * sizeof(u32)) != high) 3547 continue; 3548 3549 qed_wr(p_hwfn, p_ptt, 3550 NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); 3551 qed_wr(p_hwfn, p_ptt, 3552 NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); 3553 qed_wr(p_hwfn, p_ptt, 3554 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + 3555 i * sizeof(u32), 0); 3556 qed_wr(p_hwfn, p_ptt, 3557 NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); 3558 qed_wr(p_hwfn, p_ptt, 3559 NIG_REG_LLH_FUNC_FILTER_VALUE + 3560 (2 * i + 1) * sizeof(u32), 0); 3561 break; 3562 } 3563 3564 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) 3565 DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); 3566 } 3567 3568 static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3569 u32 hw_addr, void *p_eth_qzone, 3570 size_t eth_qzone_size, u8 timeset) 3571 { 3572 struct coalescing_timeset *p_coal_timeset; 3573 3574 if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) { 3575 DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n"); 3576 return -EINVAL; 3577 } 3578 3579 p_coal_timeset = p_eth_qzone; 3580 memset(p_coal_timeset, 0, eth_qzone_size); 3581 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); 3582 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); 3583 qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); 3584 3585 return 0; 3586 } 3587 3588 int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3589 u16 coalesce, u8 qid, u16 sb_id) 3590 { 3591 struct ustorm_eth_queue_zone eth_qzone; 3592 u8 timeset, timer_res; 3593 u16 fw_qid = 0; 3594 u32 address; 3595 int rc; 3596 3597 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 3598 if (coalesce <= 0x7F) { 3599 timer_res = 0; 3600 } else if (coalesce <= 0xFF) { 3601 timer_res = 1; 3602 } else if (coalesce <= 0x1FF) { 3603 timer_res = 2; 3604 } else { 3605 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 3606 return -EINVAL; 3607 } 3608 timeset = (u8)(coalesce >> timer_res); 3609 3610 rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid); 3611 if (rc) 3612 return rc; 3613 3614 rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false); 3615 if (rc) 3616 goto out; 3617 3618 address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); 3619 3620 rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 3621 sizeof(struct ustorm_eth_queue_zone), timeset); 3622 if (rc) 3623 goto out; 3624 3625 p_hwfn->cdev->rx_coalesce_usecs = coalesce; 3626 out: 3627 return rc; 3628 } 3629 3630 int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3631 u16 coalesce, u8 qid, u16 sb_id) 3632 { 3633 struct xstorm_eth_queue_zone eth_qzone; 3634 u8 timeset, timer_res; 3635 u16 fw_qid = 0; 3636 u32 address; 3637 int rc; 3638 3639 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ 3640 if (coalesce <= 0x7F) { 3641 timer_res = 0; 3642 } else if (coalesce <= 0xFF) { 3643 timer_res = 1; 3644 } else if (coalesce <= 0x1FF) { 3645 timer_res = 2; 3646 } else { 3647 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); 3648 return -EINVAL; 3649 } 3650 timeset = (u8)(coalesce >> timer_res); 3651 3652 rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid); 3653 if (rc) 3654 return rc; 3655 3656 rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true); 3657 if (rc) 3658 goto out; 3659 3660 address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); 3661 3662 rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, 3663 sizeof(struct xstorm_eth_queue_zone), timeset); 3664 if (rc) 3665 goto out; 3666 3667 p_hwfn->cdev->tx_coalesce_usecs = coalesce; 3668 out: 3669 return rc; 3670 } 3671 3672 /* Calculate final WFQ values for all vports and configure them. 3673 * After this configuration each vport will have 3674 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) 3675 */ 3676 static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, 3677 struct qed_ptt *p_ptt, 3678 u32 min_pf_rate) 3679 { 3680 struct init_qm_vport_params *vport_params; 3681 int i; 3682 3683 vport_params = p_hwfn->qm_info.qm_vport_params; 3684 3685 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 3686 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 3687 3688 vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) / 3689 min_pf_rate; 3690 qed_init_vport_wfq(p_hwfn, p_ptt, 3691 vport_params[i].first_tx_pq_id, 3692 vport_params[i].vport_wfq); 3693 } 3694 } 3695 3696 static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, 3697 u32 min_pf_rate) 3698 3699 { 3700 int i; 3701 3702 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) 3703 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; 3704 } 3705 3706 static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, 3707 struct qed_ptt *p_ptt, 3708 u32 min_pf_rate) 3709 { 3710 struct init_qm_vport_params *vport_params; 3711 int i; 3712 3713 vport_params = p_hwfn->qm_info.qm_vport_params; 3714 3715 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 3716 qed_init_wfq_default_param(p_hwfn, min_pf_rate); 3717 qed_init_vport_wfq(p_hwfn, p_ptt, 3718 vport_params[i].first_tx_pq_id, 3719 vport_params[i].vport_wfq); 3720 } 3721 } 3722 3723 /* This function performs several validations for WFQ 3724 * configuration and required min rate for a given vport 3725 * 1. req_rate must be greater than one percent of min_pf_rate. 3726 * 2. req_rate should not cause other vports [not configured for WFQ explicitly] 3727 * rates to get less than one percent of min_pf_rate. 3728 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. 3729 */ 3730 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, 3731 u16 vport_id, u32 req_rate, u32 min_pf_rate) 3732 { 3733 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; 3734 int non_requested_count = 0, req_count = 0, i, num_vports; 3735 3736 num_vports = p_hwfn->qm_info.num_vports; 3737 3738 /* Accounting for the vports which are configured for WFQ explicitly */ 3739 for (i = 0; i < num_vports; i++) { 3740 u32 tmp_speed; 3741 3742 if ((i != vport_id) && 3743 p_hwfn->qm_info.wfq_data[i].configured) { 3744 req_count++; 3745 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; 3746 total_req_min_rate += tmp_speed; 3747 } 3748 } 3749 3750 /* Include current vport data as well */ 3751 req_count++; 3752 total_req_min_rate += req_rate; 3753 non_requested_count = num_vports - req_count; 3754 3755 if (req_rate < min_pf_rate / QED_WFQ_UNIT) { 3756 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3757 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 3758 vport_id, req_rate, min_pf_rate); 3759 return -EINVAL; 3760 } 3761 3762 if (num_vports > QED_WFQ_UNIT) { 3763 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3764 "Number of vports is greater than %d\n", 3765 QED_WFQ_UNIT); 3766 return -EINVAL; 3767 } 3768 3769 if (total_req_min_rate > min_pf_rate) { 3770 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3771 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", 3772 total_req_min_rate, min_pf_rate); 3773 return -EINVAL; 3774 } 3775 3776 total_left_rate = min_pf_rate - total_req_min_rate; 3777 3778 left_rate_per_vp = total_left_rate / non_requested_count; 3779 if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { 3780 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3781 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", 3782 left_rate_per_vp, min_pf_rate); 3783 return -EINVAL; 3784 } 3785 3786 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; 3787 p_hwfn->qm_info.wfq_data[vport_id].configured = true; 3788 3789 for (i = 0; i < num_vports; i++) { 3790 if (p_hwfn->qm_info.wfq_data[i].configured) 3791 continue; 3792 3793 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; 3794 } 3795 3796 return 0; 3797 } 3798 3799 static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, 3800 struct qed_ptt *p_ptt, u16 vp_id, u32 rate) 3801 { 3802 struct qed_mcp_link_state *p_link; 3803 int rc = 0; 3804 3805 p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; 3806 3807 if (!p_link->min_pf_rate) { 3808 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; 3809 p_hwfn->qm_info.wfq_data[vp_id].configured = true; 3810 return rc; 3811 } 3812 3813 rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); 3814 3815 if (!rc) 3816 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, 3817 p_link->min_pf_rate); 3818 else 3819 DP_NOTICE(p_hwfn, 3820 "Validation failed while configuring min rate\n"); 3821 3822 return rc; 3823 } 3824 3825 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, 3826 struct qed_ptt *p_ptt, 3827 u32 min_pf_rate) 3828 { 3829 bool use_wfq = false; 3830 int rc = 0; 3831 u16 i; 3832 3833 /* Validate all pre configured vports for wfq */ 3834 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { 3835 u32 rate; 3836 3837 if (!p_hwfn->qm_info.wfq_data[i].configured) 3838 continue; 3839 3840 rate = p_hwfn->qm_info.wfq_data[i].min_speed; 3841 use_wfq = true; 3842 3843 rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate); 3844 if (rc) { 3845 DP_NOTICE(p_hwfn, 3846 "WFQ validation failed while configuring min rate\n"); 3847 break; 3848 } 3849 } 3850 3851 if (!rc && use_wfq) 3852 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 3853 else 3854 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); 3855 3856 return rc; 3857 } 3858 3859 /* Main API for qed clients to configure vport min rate. 3860 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] 3861 * rate - Speed in Mbps needs to be assigned to a given vport. 3862 */ 3863 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) 3864 { 3865 int i, rc = -EINVAL; 3866 3867 /* Currently not supported; Might change in future */ 3868 if (cdev->num_hwfns > 1) { 3869 DP_NOTICE(cdev, 3870 "WFQ configuration is not supported for this device\n"); 3871 return rc; 3872 } 3873 3874 for_each_hwfn(cdev, i) { 3875 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3876 struct qed_ptt *p_ptt; 3877 3878 p_ptt = qed_ptt_acquire(p_hwfn); 3879 if (!p_ptt) 3880 return -EBUSY; 3881 3882 rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); 3883 3884 if (rc) { 3885 qed_ptt_release(p_hwfn, p_ptt); 3886 return rc; 3887 } 3888 3889 qed_ptt_release(p_hwfn, p_ptt); 3890 } 3891 3892 return rc; 3893 } 3894 3895 /* API to configure WFQ from mcp link change */ 3896 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, 3897 struct qed_ptt *p_ptt, u32 min_pf_rate) 3898 { 3899 int i; 3900 3901 if (cdev->num_hwfns > 1) { 3902 DP_VERBOSE(cdev, 3903 NETIF_MSG_LINK, 3904 "WFQ configuration is not supported for this device\n"); 3905 return; 3906 } 3907 3908 for_each_hwfn(cdev, i) { 3909 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3910 3911 __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, 3912 min_pf_rate); 3913 } 3914 } 3915 3916 int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, 3917 struct qed_ptt *p_ptt, 3918 struct qed_mcp_link_state *p_link, 3919 u8 max_bw) 3920 { 3921 int rc = 0; 3922 3923 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; 3924 3925 if (!p_link->line_speed && (max_bw != 100)) 3926 return rc; 3927 3928 p_link->speed = (p_link->line_speed * max_bw) / 100; 3929 p_hwfn->qm_info.pf_rl = p_link->speed; 3930 3931 /* Since the limiter also affects Tx-switched traffic, we don't want it 3932 * to limit such traffic in case there's no actual limit. 3933 * In that case, set limit to imaginary high boundary. 3934 */ 3935 if (max_bw == 100) 3936 p_hwfn->qm_info.pf_rl = 100000; 3937 3938 rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 3939 p_hwfn->qm_info.pf_rl); 3940 3941 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 3942 "Configured MAX bandwidth to be %08x Mb/sec\n", 3943 p_link->speed); 3944 3945 return rc; 3946 } 3947 3948 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ 3949 int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) 3950 { 3951 int i, rc = -EINVAL; 3952 3953 if (max_bw < 1 || max_bw > 100) { 3954 DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n"); 3955 return rc; 3956 } 3957 3958 for_each_hwfn(cdev, i) { 3959 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 3960 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); 3961 struct qed_mcp_link_state *p_link; 3962 struct qed_ptt *p_ptt; 3963 3964 p_link = &p_lead->mcp_info->link_output; 3965 3966 p_ptt = qed_ptt_acquire(p_hwfn); 3967 if (!p_ptt) 3968 return -EBUSY; 3969 3970 rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, 3971 p_link, max_bw); 3972 3973 qed_ptt_release(p_hwfn, p_ptt); 3974 3975 if (rc) 3976 break; 3977 } 3978 3979 return rc; 3980 } 3981 3982 int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, 3983 struct qed_ptt *p_ptt, 3984 struct qed_mcp_link_state *p_link, 3985 u8 min_bw) 3986 { 3987 int rc = 0; 3988 3989 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; 3990 p_hwfn->qm_info.pf_wfq = min_bw; 3991 3992 if (!p_link->line_speed) 3993 return rc; 3994 3995 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; 3996 3997 rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); 3998 3999 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, 4000 "Configured MIN bandwidth to be %d Mb/sec\n", 4001 p_link->min_pf_rate); 4002 4003 return rc; 4004 } 4005 4006 /* Main API to configure PF min bandwidth where bw range is [1-100] */ 4007 int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) 4008 { 4009 int i, rc = -EINVAL; 4010 4011 if (min_bw < 1 || min_bw > 100) { 4012 DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n"); 4013 return rc; 4014 } 4015 4016 for_each_hwfn(cdev, i) { 4017 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4018 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); 4019 struct qed_mcp_link_state *p_link; 4020 struct qed_ptt *p_ptt; 4021 4022 p_link = &p_lead->mcp_info->link_output; 4023 4024 p_ptt = qed_ptt_acquire(p_hwfn); 4025 if (!p_ptt) 4026 return -EBUSY; 4027 4028 rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, 4029 p_link, min_bw); 4030 if (rc) { 4031 qed_ptt_release(p_hwfn, p_ptt); 4032 return rc; 4033 } 4034 4035 if (p_link->min_pf_rate) { 4036 u32 min_rate = p_link->min_pf_rate; 4037 4038 rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, 4039 p_ptt, 4040 min_rate); 4041 } 4042 4043 qed_ptt_release(p_hwfn, p_ptt); 4044 } 4045 4046 return rc; 4047 } 4048 4049 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 4050 { 4051 struct qed_mcp_link_state *p_link; 4052 4053 p_link = &p_hwfn->mcp_info->link_output; 4054 4055 if (p_link->min_pf_rate) 4056 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, 4057 p_link->min_pf_rate); 4058 4059 memset(p_hwfn->qm_info.wfq_data, 0, 4060 sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); 4061 } 4062 4063 int qed_device_num_engines(struct qed_dev *cdev) 4064 { 4065 return QED_IS_BB(cdev) ? 2 : 1; 4066 } 4067 4068 static int qed_device_num_ports(struct qed_dev *cdev) 4069 { 4070 /* in CMT always only one port */ 4071 if (cdev->num_hwfns > 1) 4072 return 1; 4073 4074 return cdev->num_ports_in_engines * qed_device_num_engines(cdev); 4075 } 4076 4077 int qed_device_get_port_id(struct qed_dev *cdev) 4078 { 4079 return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); 4080 } 4081