1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 #include <linux/netdevice.h> 14 #include <linux/if_vlan.h> 15 #include <linux/interrupt.h> 16 #include <linux/etherdevice.h> 17 #include "bnxt_hsi.h" 18 #include "bnxt.h" 19 #include "bnxt_ulp.h" 20 #include "bnxt_sriov.h" 21 #include "bnxt_vfr.h" 22 #include "bnxt_ethtool.h" 23 24 #ifdef CONFIG_BNXT_SRIOV 25 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, 26 struct bnxt_vf_info *vf, u16 event_id) 27 { 28 struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; 29 struct hwrm_fwd_async_event_cmpl_input req = {0}; 30 struct hwrm_async_event_cmpl *async_cmpl; 31 int rc = 0; 32 33 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); 34 if (vf) 35 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); 36 else 37 /* broadcast this async event to all VFs */ 38 req.encap_async_event_target_id = cpu_to_le16(0xffff); 39 async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; 40 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); 41 async_cmpl->event_id = cpu_to_le16(event_id); 42 43 mutex_lock(&bp->hwrm_cmd_lock); 44 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 45 46 if (rc) { 47 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", 48 rc); 49 goto fwd_async_event_cmpl_exit; 50 } 51 52 if (resp->error_code) { 53 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", 54 resp->error_code); 55 rc = -1; 56 } 57 58 fwd_async_event_cmpl_exit: 59 mutex_unlock(&bp->hwrm_cmd_lock); 60 return rc; 61 } 62 63 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 64 { 65 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 66 netdev_err(bp->dev, "vf ndo called though PF is down\n"); 67 return -EINVAL; 68 } 69 if (!bp->pf.active_vfs) { 70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 71 return -EINVAL; 72 } 73 if (vf_id >= bp->pf.active_vfs) { 74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 75 return -EINVAL; 76 } 77 return 0; 78 } 79 80 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) 81 { 82 struct hwrm_func_cfg_input req = {0}; 83 struct bnxt *bp = netdev_priv(dev); 84 struct bnxt_vf_info *vf; 85 bool old_setting = false; 86 u32 func_flags; 87 int rc; 88 89 if (bp->hwrm_spec_code < 0x10701) 90 return -ENOTSUPP; 91 92 rc = bnxt_vf_ndo_prep(bp, vf_id); 93 if (rc) 94 return rc; 95 96 vf = &bp->pf.vf[vf_id]; 97 if (vf->flags & BNXT_VF_SPOOFCHK) 98 old_setting = true; 99 if (old_setting == setting) 100 return 0; 101 102 func_flags = vf->func_flags; 103 if (setting) 104 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; 105 else 106 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; 107 /*TODO: if the driver supports VLAN filter on guest VLAN, 108 * the spoof check should also include vlan anti-spoofing 109 */ 110 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 111 req.fid = cpu_to_le16(vf->fw_fid); 112 req.flags = cpu_to_le32(func_flags); 113 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 114 if (!rc) { 115 vf->func_flags = func_flags; 116 if (setting) 117 vf->flags |= BNXT_VF_SPOOFCHK; 118 else 119 vf->flags &= ~BNXT_VF_SPOOFCHK; 120 } 121 return rc; 122 } 123 124 int bnxt_get_vf_config(struct net_device *dev, int vf_id, 125 struct ifla_vf_info *ivi) 126 { 127 struct bnxt *bp = netdev_priv(dev); 128 struct bnxt_vf_info *vf; 129 int rc; 130 131 rc = bnxt_vf_ndo_prep(bp, vf_id); 132 if (rc) 133 return rc; 134 135 ivi->vf = vf_id; 136 vf = &bp->pf.vf[vf_id]; 137 138 if (is_valid_ether_addr(vf->mac_addr)) 139 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); 140 else 141 memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); 142 ivi->max_tx_rate = vf->max_tx_rate; 143 ivi->min_tx_rate = vf->min_tx_rate; 144 ivi->vlan = vf->vlan; 145 if (vf->flags & BNXT_VF_QOS) 146 ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; 147 else 148 ivi->qos = 0; 149 ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); 150 if (!(vf->flags & BNXT_VF_LINK_FORCED)) 151 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 152 else if (vf->flags & BNXT_VF_LINK_UP) 153 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 154 else 155 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 156 157 return 0; 158 } 159 160 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) 161 { 162 struct hwrm_func_cfg_input req = {0}; 163 struct bnxt *bp = netdev_priv(dev); 164 struct bnxt_vf_info *vf; 165 int rc; 166 167 rc = bnxt_vf_ndo_prep(bp, vf_id); 168 if (rc) 169 return rc; 170 /* reject bc or mc mac addr, zero mac addr means allow 171 * VF to use its own mac addr 172 */ 173 if (is_multicast_ether_addr(mac)) { 174 netdev_err(dev, "Invalid VF ethernet address\n"); 175 return -EINVAL; 176 } 177 vf = &bp->pf.vf[vf_id]; 178 179 memcpy(vf->mac_addr, mac, ETH_ALEN); 180 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 181 req.fid = cpu_to_le16(vf->fw_fid); 182 req.flags = cpu_to_le32(vf->func_flags); 183 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 184 memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 185 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 186 } 187 188 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, 189 __be16 vlan_proto) 190 { 191 struct hwrm_func_cfg_input req = {0}; 192 struct bnxt *bp = netdev_priv(dev); 193 struct bnxt_vf_info *vf; 194 u16 vlan_tag; 195 int rc; 196 197 if (bp->hwrm_spec_code < 0x10201) 198 return -ENOTSUPP; 199 200 if (vlan_proto != htons(ETH_P_8021Q)) 201 return -EPROTONOSUPPORT; 202 203 rc = bnxt_vf_ndo_prep(bp, vf_id); 204 if (rc) 205 return rc; 206 207 /* TODO: needed to implement proper handling of user priority, 208 * currently fail the command if there is valid priority 209 */ 210 if (vlan_id > 4095 || qos) 211 return -EINVAL; 212 213 vf = &bp->pf.vf[vf_id]; 214 vlan_tag = vlan_id; 215 if (vlan_tag == vf->vlan) 216 return 0; 217 218 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 219 req.fid = cpu_to_le16(vf->fw_fid); 220 req.flags = cpu_to_le32(vf->func_flags); 221 req.dflt_vlan = cpu_to_le16(vlan_tag); 222 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 223 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 224 if (!rc) 225 vf->vlan = vlan_tag; 226 return rc; 227 } 228 229 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, 230 int max_tx_rate) 231 { 232 struct hwrm_func_cfg_input req = {0}; 233 struct bnxt *bp = netdev_priv(dev); 234 struct bnxt_vf_info *vf; 235 u32 pf_link_speed; 236 int rc; 237 238 rc = bnxt_vf_ndo_prep(bp, vf_id); 239 if (rc) 240 return rc; 241 242 vf = &bp->pf.vf[vf_id]; 243 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 244 if (max_tx_rate > pf_link_speed) { 245 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", 246 max_tx_rate, vf_id); 247 return -EINVAL; 248 } 249 250 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { 251 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", 252 min_tx_rate, vf_id); 253 return -EINVAL; 254 } 255 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) 256 return 0; 257 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 258 req.fid = cpu_to_le16(vf->fw_fid); 259 req.flags = cpu_to_le32(vf->func_flags); 260 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 261 req.max_bw = cpu_to_le32(max_tx_rate); 262 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); 263 req.min_bw = cpu_to_le32(min_tx_rate); 264 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 265 if (!rc) { 266 vf->min_tx_rate = min_tx_rate; 267 vf->max_tx_rate = max_tx_rate; 268 } 269 return rc; 270 } 271 272 int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) 273 { 274 struct bnxt *bp = netdev_priv(dev); 275 struct bnxt_vf_info *vf; 276 int rc; 277 278 rc = bnxt_vf_ndo_prep(bp, vf_id); 279 if (rc) 280 return rc; 281 282 vf = &bp->pf.vf[vf_id]; 283 284 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); 285 switch (link) { 286 case IFLA_VF_LINK_STATE_AUTO: 287 vf->flags |= BNXT_VF_LINK_UP; 288 break; 289 case IFLA_VF_LINK_STATE_DISABLE: 290 vf->flags |= BNXT_VF_LINK_FORCED; 291 break; 292 case IFLA_VF_LINK_STATE_ENABLE: 293 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; 294 break; 295 default: 296 netdev_err(bp->dev, "Invalid link option\n"); 297 rc = -EINVAL; 298 break; 299 } 300 if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) 301 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, 302 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); 303 return rc; 304 } 305 306 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) 307 { 308 int i; 309 struct bnxt_vf_info *vf; 310 311 for (i = 0; i < num_vfs; i++) { 312 vf = &bp->pf.vf[i]; 313 memset(vf, 0, sizeof(*vf)); 314 } 315 return 0; 316 } 317 318 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) 319 { 320 int i, rc = 0; 321 struct bnxt_pf_info *pf = &bp->pf; 322 struct hwrm_func_vf_resc_free_input req = {0}; 323 324 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); 325 326 mutex_lock(&bp->hwrm_cmd_lock); 327 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { 328 req.vf_id = cpu_to_le16(i); 329 rc = _hwrm_send_message(bp, &req, sizeof(req), 330 HWRM_CMD_TIMEOUT); 331 if (rc) 332 break; 333 } 334 mutex_unlock(&bp->hwrm_cmd_lock); 335 return rc; 336 } 337 338 static void bnxt_free_vf_resources(struct bnxt *bp) 339 { 340 struct pci_dev *pdev = bp->pdev; 341 int i; 342 343 kfree(bp->pf.vf_event_bmap); 344 bp->pf.vf_event_bmap = NULL; 345 346 for (i = 0; i < 4; i++) { 347 if (bp->pf.hwrm_cmd_req_addr[i]) { 348 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, 349 bp->pf.hwrm_cmd_req_addr[i], 350 bp->pf.hwrm_cmd_req_dma_addr[i]); 351 bp->pf.hwrm_cmd_req_addr[i] = NULL; 352 } 353 } 354 355 kfree(bp->pf.vf); 356 bp->pf.vf = NULL; 357 } 358 359 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) 360 { 361 struct pci_dev *pdev = bp->pdev; 362 u32 nr_pages, size, i, j, k = 0; 363 364 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); 365 if (!bp->pf.vf) 366 return -ENOMEM; 367 368 bnxt_set_vf_attr(bp, num_vfs); 369 370 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; 371 nr_pages = size / BNXT_PAGE_SIZE; 372 if (size & (BNXT_PAGE_SIZE - 1)) 373 nr_pages++; 374 375 for (i = 0; i < nr_pages; i++) { 376 bp->pf.hwrm_cmd_req_addr[i] = 377 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, 378 &bp->pf.hwrm_cmd_req_dma_addr[i], 379 GFP_KERNEL); 380 381 if (!bp->pf.hwrm_cmd_req_addr[i]) 382 return -ENOMEM; 383 384 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { 385 struct bnxt_vf_info *vf = &bp->pf.vf[k]; 386 387 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + 388 j * BNXT_HWRM_REQ_MAX_SIZE; 389 vf->hwrm_cmd_req_dma_addr = 390 bp->pf.hwrm_cmd_req_dma_addr[i] + j * 391 BNXT_HWRM_REQ_MAX_SIZE; 392 k++; 393 } 394 } 395 396 /* Max 128 VF's */ 397 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); 398 if (!bp->pf.vf_event_bmap) 399 return -ENOMEM; 400 401 bp->pf.hwrm_cmd_req_pages = nr_pages; 402 return 0; 403 } 404 405 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) 406 { 407 struct hwrm_func_buf_rgtr_input req = {0}; 408 409 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); 410 411 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); 412 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); 413 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); 414 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); 415 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); 416 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); 417 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); 418 419 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 420 } 421 422 /* Only called by PF to reserve resources for VFs, returns actual number of 423 * VFs configured, or < 0 on error. 424 */ 425 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) 426 { 427 struct hwrm_func_vf_resource_cfg_input req = {0}; 428 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 429 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; 430 u16 vf_stat_ctx, vf_vnics, vf_ring_grps; 431 struct bnxt_pf_info *pf = &bp->pf; 432 int i, rc = 0; 433 434 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); 435 436 vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; 437 vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; 438 if (bp->flags & BNXT_FLAG_AGG_RINGS) 439 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; 440 else 441 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; 442 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; 443 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; 444 vf_vnics = hw_resc->max_vnics - bp->nr_vnics; 445 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 446 447 req.min_rsscos_ctx = cpu_to_le16(1); 448 req.max_rsscos_ctx = cpu_to_le16(1); 449 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) { 450 req.min_cmpl_rings = cpu_to_le16(1); 451 req.min_tx_rings = cpu_to_le16(1); 452 req.min_rx_rings = cpu_to_le16(1); 453 req.min_l2_ctxs = cpu_to_le16(1); 454 req.min_vnics = cpu_to_le16(1); 455 req.min_stat_ctx = cpu_to_le16(1); 456 req.min_hw_ring_grps = cpu_to_le16(1); 457 } else { 458 vf_cp_rings /= num_vfs; 459 vf_tx_rings /= num_vfs; 460 vf_rx_rings /= num_vfs; 461 vf_vnics /= num_vfs; 462 vf_stat_ctx /= num_vfs; 463 vf_ring_grps /= num_vfs; 464 465 req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); 466 req.min_tx_rings = cpu_to_le16(vf_tx_rings); 467 req.min_rx_rings = cpu_to_le16(vf_rx_rings); 468 req.min_l2_ctxs = cpu_to_le16(4); 469 req.min_vnics = cpu_to_le16(vf_vnics); 470 req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); 471 req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); 472 } 473 req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); 474 req.max_tx_rings = cpu_to_le16(vf_tx_rings); 475 req.max_rx_rings = cpu_to_le16(vf_rx_rings); 476 req.max_l2_ctxs = cpu_to_le16(4); 477 req.max_vnics = cpu_to_le16(vf_vnics); 478 req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); 479 req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); 480 481 mutex_lock(&bp->hwrm_cmd_lock); 482 for (i = 0; i < num_vfs; i++) { 483 req.vf_id = cpu_to_le16(pf->first_vf_id + i); 484 rc = _hwrm_send_message(bp, &req, sizeof(req), 485 HWRM_CMD_TIMEOUT); 486 if (rc) { 487 rc = -ENOMEM; 488 break; 489 } 490 pf->active_vfs = i + 1; 491 pf->vf[i].fw_fid = pf->first_vf_id + i; 492 } 493 mutex_unlock(&bp->hwrm_cmd_lock); 494 if (pf->active_vfs) { 495 u16 n = 1; 496 497 if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL) 498 n = pf->active_vfs; 499 500 hw_resc->max_tx_rings -= vf_tx_rings * n; 501 hw_resc->max_rx_rings -= vf_rx_rings * n; 502 hw_resc->max_hw_ring_grps -= vf_ring_grps * n; 503 hw_resc->max_cp_rings -= vf_cp_rings * n; 504 hw_resc->max_rsscos_ctxs -= pf->active_vfs; 505 hw_resc->max_stat_ctxs -= vf_stat_ctx * n; 506 hw_resc->max_vnics -= vf_vnics * n; 507 508 rc = pf->active_vfs; 509 } 510 return rc; 511 } 512 513 /* Only called by PF to reserve resources for VFs, returns actual number of 514 * VFs configured, or < 0 on error. 515 */ 516 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) 517 { 518 u32 rc = 0, mtu, i; 519 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; 520 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 521 u16 vf_ring_grps, max_stat_ctxs; 522 struct hwrm_func_cfg_input req = {0}; 523 struct bnxt_pf_info *pf = &bp->pf; 524 int total_vf_tx_rings = 0; 525 526 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 527 528 max_stat_ctxs = hw_resc->max_stat_ctxs; 529 530 /* Remaining rings are distributed equally amongs VF's for now */ 531 vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; 532 vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; 533 if (bp->flags & BNXT_FLAG_AGG_RINGS) 534 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / 535 num_vfs; 536 else 537 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) / 538 num_vfs; 539 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; 540 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs; 541 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; 542 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 543 544 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | 545 FUNC_CFG_REQ_ENABLES_MRU | 546 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | 547 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | 548 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 549 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | 550 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 551 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | 552 FUNC_CFG_REQ_ENABLES_NUM_VNICS | 553 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); 554 555 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 556 req.mru = cpu_to_le16(mtu); 557 req.mtu = cpu_to_le16(mtu); 558 559 req.num_rsscos_ctxs = cpu_to_le16(1); 560 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); 561 req.num_tx_rings = cpu_to_le16(vf_tx_rings); 562 req.num_rx_rings = cpu_to_le16(vf_rx_rings); 563 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); 564 req.num_l2_ctxs = cpu_to_le16(4); 565 566 req.num_vnics = cpu_to_le16(vf_vnics); 567 /* FIXME spec currently uses 1 bit for stats ctx */ 568 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); 569 570 mutex_lock(&bp->hwrm_cmd_lock); 571 for (i = 0; i < num_vfs; i++) { 572 int vf_tx_rsvd = vf_tx_rings; 573 574 req.fid = cpu_to_le16(pf->first_vf_id + i); 575 rc = _hwrm_send_message(bp, &req, sizeof(req), 576 HWRM_CMD_TIMEOUT); 577 if (rc) 578 break; 579 pf->active_vfs = i + 1; 580 pf->vf[i].fw_fid = le16_to_cpu(req.fid); 581 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, 582 &vf_tx_rsvd); 583 if (rc) 584 break; 585 total_vf_tx_rings += vf_tx_rsvd; 586 } 587 mutex_unlock(&bp->hwrm_cmd_lock); 588 if (rc) 589 rc = -ENOMEM; 590 if (pf->active_vfs) { 591 hw_resc->max_tx_rings -= total_vf_tx_rings; 592 hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; 593 hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs; 594 hw_resc->max_cp_rings -= vf_cp_rings * num_vfs; 595 hw_resc->max_rsscos_ctxs -= num_vfs; 596 hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs; 597 hw_resc->max_vnics -= vf_vnics * num_vfs; 598 rc = pf->active_vfs; 599 } 600 return rc; 601 } 602 603 static int bnxt_func_cfg(struct bnxt *bp, int num_vfs) 604 { 605 if (bp->flags & BNXT_FLAG_NEW_RM) 606 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs); 607 else 608 return bnxt_hwrm_func_cfg(bp, num_vfs); 609 } 610 611 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) 612 { 613 int rc = 0, vfs_supported; 614 int min_rx_rings, min_tx_rings, min_rss_ctxs; 615 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 616 int tx_ok = 0, rx_ok = 0, rss_ok = 0; 617 int avail_cp, avail_stat; 618 619 /* Check if we can enable requested num of vf's. At a mininum 620 * we require 1 RX 1 TX rings for each VF. In this minimum conf 621 * features like TPA will not be available. 622 */ 623 vfs_supported = *num_vfs; 624 625 avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; 626 avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; 627 avail_cp = min_t(int, avail_cp, avail_stat); 628 629 while (vfs_supported) { 630 min_rx_rings = vfs_supported; 631 min_tx_rings = vfs_supported; 632 min_rss_ctxs = vfs_supported; 633 634 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 635 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >= 636 min_rx_rings) 637 rx_ok = 1; 638 } else { 639 if (hw_resc->max_rx_rings - bp->rx_nr_rings >= 640 min_rx_rings) 641 rx_ok = 1; 642 } 643 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings || 644 avail_cp < min_rx_rings) 645 rx_ok = 0; 646 647 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings && 648 avail_cp >= min_tx_rings) 649 tx_ok = 1; 650 651 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >= 652 min_rss_ctxs) 653 rss_ok = 1; 654 655 if (tx_ok && rx_ok && rss_ok) 656 break; 657 658 vfs_supported--; 659 } 660 661 if (!vfs_supported) { 662 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); 663 return -EINVAL; 664 } 665 666 if (vfs_supported != *num_vfs) { 667 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", 668 *num_vfs, vfs_supported); 669 *num_vfs = vfs_supported; 670 } 671 672 rc = bnxt_alloc_vf_resources(bp, *num_vfs); 673 if (rc) 674 goto err_out1; 675 676 /* Reserve resources for VFs */ 677 rc = bnxt_func_cfg(bp, *num_vfs); 678 if (rc != *num_vfs) { 679 if (rc <= 0) { 680 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); 681 *num_vfs = 0; 682 goto err_out2; 683 } 684 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc); 685 *num_vfs = rc; 686 } 687 688 /* Register buffers for VFs */ 689 rc = bnxt_hwrm_func_buf_rgtr(bp); 690 if (rc) 691 goto err_out2; 692 693 bnxt_ulp_sriov_cfg(bp, *num_vfs); 694 695 rc = pci_enable_sriov(bp->pdev, *num_vfs); 696 if (rc) 697 goto err_out2; 698 699 return 0; 700 701 err_out2: 702 /* Free the resources reserved for various VF's */ 703 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); 704 705 err_out1: 706 bnxt_free_vf_resources(bp); 707 708 return rc; 709 } 710 711 void bnxt_sriov_disable(struct bnxt *bp) 712 { 713 u16 num_vfs = pci_num_vf(bp->pdev); 714 715 if (!num_vfs) 716 return; 717 718 /* synchronize VF and VF-rep create and destroy */ 719 mutex_lock(&bp->sriov_lock); 720 bnxt_vf_reps_destroy(bp); 721 722 if (pci_vfs_assigned(bp->pdev)) { 723 bnxt_hwrm_fwd_async_event_cmpl( 724 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); 725 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", 726 num_vfs); 727 } else { 728 pci_disable_sriov(bp->pdev); 729 /* Free the HW resources reserved for various VF's */ 730 bnxt_hwrm_func_vf_resource_free(bp, num_vfs); 731 } 732 mutex_unlock(&bp->sriov_lock); 733 734 bnxt_free_vf_resources(bp); 735 736 bp->pf.active_vfs = 0; 737 /* Reclaim all resources for the PF. */ 738 rtnl_lock(); 739 bnxt_restore_pf_fw_resources(bp); 740 rtnl_unlock(); 741 742 bnxt_ulp_sriov_cfg(bp, 0); 743 } 744 745 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) 746 { 747 struct net_device *dev = pci_get_drvdata(pdev); 748 struct bnxt *bp = netdev_priv(dev); 749 750 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { 751 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); 752 return 0; 753 } 754 755 rtnl_lock(); 756 if (!netif_running(dev)) { 757 netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); 758 rtnl_unlock(); 759 return 0; 760 } 761 bp->sriov_cfg = true; 762 rtnl_unlock(); 763 764 if (pci_vfs_assigned(bp->pdev)) { 765 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); 766 num_vfs = 0; 767 goto sriov_cfg_exit; 768 } 769 770 /* Check if enabled VFs is same as requested */ 771 if (num_vfs && num_vfs == bp->pf.active_vfs) 772 goto sriov_cfg_exit; 773 774 /* if there are previous existing VFs, clean them up */ 775 bnxt_sriov_disable(bp); 776 if (!num_vfs) 777 goto sriov_cfg_exit; 778 779 bnxt_sriov_enable(bp, &num_vfs); 780 781 sriov_cfg_exit: 782 bp->sriov_cfg = false; 783 wake_up(&bp->sriov_cfg_wait); 784 785 return num_vfs; 786 } 787 788 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 789 void *encap_resp, __le64 encap_resp_addr, 790 __le16 encap_resp_cpr, u32 msg_size) 791 { 792 int rc = 0; 793 struct hwrm_fwd_resp_input req = {0}; 794 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 795 796 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); 797 798 /* Set the new target id */ 799 req.target_id = cpu_to_le16(vf->fw_fid); 800 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 801 req.encap_resp_len = cpu_to_le16(msg_size); 802 req.encap_resp_addr = encap_resp_addr; 803 req.encap_resp_cmpl_ring = encap_resp_cpr; 804 memcpy(req.encap_resp, encap_resp, msg_size); 805 806 mutex_lock(&bp->hwrm_cmd_lock); 807 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 808 809 if (rc) { 810 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); 811 goto fwd_resp_exit; 812 } 813 814 if (resp->error_code) { 815 netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", 816 resp->error_code); 817 rc = -1; 818 } 819 820 fwd_resp_exit: 821 mutex_unlock(&bp->hwrm_cmd_lock); 822 return rc; 823 } 824 825 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 826 u32 msg_size) 827 { 828 int rc = 0; 829 struct hwrm_reject_fwd_resp_input req = {0}; 830 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 831 832 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); 833 /* Set the new target id */ 834 req.target_id = cpu_to_le16(vf->fw_fid); 835 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 836 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 837 838 mutex_lock(&bp->hwrm_cmd_lock); 839 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 840 841 if (rc) { 842 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); 843 goto fwd_err_resp_exit; 844 } 845 846 if (resp->error_code) { 847 netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", 848 resp->error_code); 849 rc = -1; 850 } 851 852 fwd_err_resp_exit: 853 mutex_unlock(&bp->hwrm_cmd_lock); 854 return rc; 855 } 856 857 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, 858 u32 msg_size) 859 { 860 int rc = 0; 861 struct hwrm_exec_fwd_resp_input req = {0}; 862 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 863 864 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); 865 /* Set the new target id */ 866 req.target_id = cpu_to_le16(vf->fw_fid); 867 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); 868 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); 869 870 mutex_lock(&bp->hwrm_cmd_lock); 871 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 872 873 if (rc) { 874 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); 875 goto exec_fwd_resp_exit; 876 } 877 878 if (resp->error_code) { 879 netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", 880 resp->error_code); 881 rc = -1; 882 } 883 884 exec_fwd_resp_exit: 885 mutex_unlock(&bp->hwrm_cmd_lock); 886 return rc; 887 } 888 889 static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 890 { 891 u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); 892 struct hwrm_func_vf_cfg_input *req = 893 (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; 894 895 /* Only allow VF to set a valid MAC address if the PF assigned MAC 896 * address is zero 897 */ 898 if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { 899 if (is_valid_ether_addr(req->dflt_mac_addr) && 900 !is_valid_ether_addr(vf->mac_addr)) { 901 ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); 902 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 903 } 904 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 905 } 906 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 907 } 908 909 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) 910 { 911 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); 912 struct hwrm_cfa_l2_filter_alloc_input *req = 913 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; 914 bool mac_ok = false; 915 916 /* VF MAC address must first match PF MAC address, if it is valid. 917 * Otherwise, it must match the VF MAC address if firmware spec >= 918 * 1.2.2 919 */ 920 if (is_valid_ether_addr(vf->mac_addr)) { 921 if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) 922 mac_ok = true; 923 } else if (is_valid_ether_addr(vf->vf_mac_addr)) { 924 if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr)) 925 mac_ok = true; 926 } else if (bp->hwrm_spec_code < 0x10202) { 927 mac_ok = true; 928 } else { 929 mac_ok = true; 930 } 931 if (mac_ok) 932 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); 933 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); 934 } 935 936 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) 937 { 938 int rc = 0; 939 940 if (!(vf->flags & BNXT_VF_LINK_FORCED)) { 941 /* real link */ 942 rc = bnxt_hwrm_exec_fwd_resp( 943 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); 944 } else { 945 struct hwrm_port_phy_qcfg_output phy_qcfg_resp; 946 struct hwrm_port_phy_qcfg_input *phy_qcfg_req; 947 948 phy_qcfg_req = 949 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; 950 mutex_lock(&bp->hwrm_cmd_lock); 951 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, 952 sizeof(phy_qcfg_resp)); 953 mutex_unlock(&bp->hwrm_cmd_lock); 954 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; 955 956 if (vf->flags & BNXT_VF_LINK_UP) { 957 /* if physical link is down, force link up on VF */ 958 if (phy_qcfg_resp.link != 959 PORT_PHY_QCFG_RESP_LINK_LINK) { 960 phy_qcfg_resp.link = 961 PORT_PHY_QCFG_RESP_LINK_LINK; 962 phy_qcfg_resp.link_speed = cpu_to_le16( 963 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); 964 phy_qcfg_resp.duplex_cfg = 965 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; 966 phy_qcfg_resp.duplex_state = 967 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; 968 phy_qcfg_resp.pause = 969 (PORT_PHY_QCFG_RESP_PAUSE_TX | 970 PORT_PHY_QCFG_RESP_PAUSE_RX); 971 } 972 } else { 973 /* force link down */ 974 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; 975 phy_qcfg_resp.link_speed = 0; 976 phy_qcfg_resp.duplex_state = 977 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; 978 phy_qcfg_resp.pause = 0; 979 } 980 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, 981 phy_qcfg_req->resp_addr, 982 phy_qcfg_req->cmpl_ring, 983 sizeof(phy_qcfg_resp)); 984 } 985 return rc; 986 } 987 988 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) 989 { 990 int rc = 0; 991 struct input *encap_req = vf->hwrm_cmd_req_addr; 992 u32 req_type = le16_to_cpu(encap_req->req_type); 993 994 switch (req_type) { 995 case HWRM_FUNC_VF_CFG: 996 rc = bnxt_vf_store_mac(bp, vf); 997 break; 998 case HWRM_CFA_L2_FILTER_ALLOC: 999 rc = bnxt_vf_validate_set_mac(bp, vf); 1000 break; 1001 case HWRM_FUNC_CFG: 1002 /* TODO Validate if VF is allowed to change mac address, 1003 * mtu, num of rings etc 1004 */ 1005 rc = bnxt_hwrm_exec_fwd_resp( 1006 bp, vf, sizeof(struct hwrm_func_cfg_input)); 1007 break; 1008 case HWRM_PORT_PHY_QCFG: 1009 rc = bnxt_vf_set_link(bp, vf); 1010 break; 1011 default: 1012 break; 1013 } 1014 return rc; 1015 } 1016 1017 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1018 { 1019 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; 1020 1021 /* Scan through VF's and process commands */ 1022 while (1) { 1023 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); 1024 if (vf_id >= active_vfs) 1025 break; 1026 1027 clear_bit(vf_id, bp->pf.vf_event_bmap); 1028 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); 1029 i = vf_id + 1; 1030 } 1031 } 1032 1033 void bnxt_update_vf_mac(struct bnxt *bp) 1034 { 1035 struct hwrm_func_qcaps_input req = {0}; 1036 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 1037 1038 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 1039 req.fid = cpu_to_le16(0xffff); 1040 1041 mutex_lock(&bp->hwrm_cmd_lock); 1042 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 1043 goto update_vf_mac_exit; 1044 1045 /* Store MAC address from the firmware. There are 2 cases: 1046 * 1. MAC address is valid. It is assigned from the PF and we 1047 * need to override the current VF MAC address with it. 1048 * 2. MAC address is zero. The VF will use a random MAC address by 1049 * default but the stored zero MAC will allow the VF user to change 1050 * the random MAC address using ndo_set_mac_address() if he wants. 1051 */ 1052 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) 1053 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); 1054 1055 /* overwrite netdev dev_addr with admin VF MAC */ 1056 if (is_valid_ether_addr(bp->vf.mac_addr)) 1057 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 1058 update_vf_mac_exit: 1059 mutex_unlock(&bp->hwrm_cmd_lock); 1060 } 1061 1062 int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 1063 { 1064 struct hwrm_func_vf_cfg_input req = {0}; 1065 int rc = 0; 1066 1067 if (!BNXT_VF(bp)) 1068 return 0; 1069 1070 if (bp->hwrm_spec_code < 0x10202) { 1071 if (is_valid_ether_addr(bp->vf.mac_addr)) 1072 rc = -EADDRNOTAVAIL; 1073 goto mac_done; 1074 } 1075 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 1076 req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 1077 memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 1078 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1079 mac_done: 1080 if (rc) { 1081 rc = -EADDRNOTAVAIL; 1082 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", 1083 mac); 1084 } 1085 return rc; 1086 } 1087 #else 1088 1089 void bnxt_sriov_disable(struct bnxt *bp) 1090 { 1091 } 1092 1093 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) 1094 { 1095 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); 1096 } 1097 1098 void bnxt_update_vf_mac(struct bnxt *bp) 1099 { 1100 } 1101 1102 int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 1103 { 1104 return 0; 1105 } 1106 #endif 1107