1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2018 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #include <linux/ethtool.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/etherdevice.h>
18 #include "bnxt_hsi.h"
19 #include "bnxt.h"
20 #include "bnxt_ulp.h"
21 #include "bnxt_sriov.h"
22 #include "bnxt_vfr.h"
23 #include "bnxt_ethtool.h"
24 
25 #ifdef CONFIG_BNXT_SRIOV
26 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
27 					  struct bnxt_vf_info *vf, u16 event_id)
28 {
29 	struct hwrm_fwd_async_event_cmpl_input req = {0};
30 	struct hwrm_async_event_cmpl *async_cmpl;
31 	int rc = 0;
32 
33 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
34 	if (vf)
35 		req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
36 	else
37 		/* broadcast this async event to all VFs */
38 		req.encap_async_event_target_id = cpu_to_le16(0xffff);
39 	async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
40 	async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
41 	async_cmpl->event_id = cpu_to_le16(event_id);
42 
43 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
44 	if (rc)
45 		netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
46 			   rc);
47 	return rc;
48 }
49 
50 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
51 {
52 	if (!bp->pf.active_vfs) {
53 		netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
54 		return -EINVAL;
55 	}
56 	if (vf_id >= bp->pf.active_vfs) {
57 		netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
58 		return -EINVAL;
59 	}
60 	return 0;
61 }
62 
63 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
64 {
65 	struct hwrm_func_cfg_input req = {0};
66 	struct bnxt *bp = netdev_priv(dev);
67 	struct bnxt_vf_info *vf;
68 	bool old_setting = false;
69 	u32 func_flags;
70 	int rc;
71 
72 	if (bp->hwrm_spec_code < 0x10701)
73 		return -ENOTSUPP;
74 
75 	rc = bnxt_vf_ndo_prep(bp, vf_id);
76 	if (rc)
77 		return rc;
78 
79 	vf = &bp->pf.vf[vf_id];
80 	if (vf->flags & BNXT_VF_SPOOFCHK)
81 		old_setting = true;
82 	if (old_setting == setting)
83 		return 0;
84 
85 	if (setting)
86 		func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
87 	else
88 		func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
89 	/*TODO: if the driver supports VLAN filter on guest VLAN,
90 	 * the spoof check should also include vlan anti-spoofing
91 	 */
92 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
93 	req.fid = cpu_to_le16(vf->fw_fid);
94 	req.flags = cpu_to_le32(func_flags);
95 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
96 	if (!rc) {
97 		if (setting)
98 			vf->flags |= BNXT_VF_SPOOFCHK;
99 		else
100 			vf->flags &= ~BNXT_VF_SPOOFCHK;
101 	}
102 	return rc;
103 }
104 
105 static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
106 {
107 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
108 	struct hwrm_func_qcfg_input req = {0};
109 	int rc;
110 
111 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
112 	req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
113 	mutex_lock(&bp->hwrm_cmd_lock);
114 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
115 	if (rc) {
116 		mutex_unlock(&bp->hwrm_cmd_lock);
117 		return rc;
118 	}
119 	vf->func_qcfg_flags = le16_to_cpu(resp->flags);
120 	mutex_unlock(&bp->hwrm_cmd_lock);
121 	return 0;
122 }
123 
124 bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
125 {
126 	if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
127 		return !!(vf->flags & BNXT_VF_TRUST);
128 
129 	bnxt_hwrm_func_qcfg_flags(bp, vf);
130 	return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
131 }
132 
133 static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
134 {
135 	struct hwrm_func_cfg_input req = {0};
136 
137 	if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
138 		return 0;
139 
140 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
141 	req.fid = cpu_to_le16(vf->fw_fid);
142 	if (vf->flags & BNXT_VF_TRUST)
143 		req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
144 	else
145 		req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
146 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
147 }
148 
149 int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
150 {
151 	struct bnxt *bp = netdev_priv(dev);
152 	struct bnxt_vf_info *vf;
153 
154 	if (bnxt_vf_ndo_prep(bp, vf_id))
155 		return -EINVAL;
156 
157 	vf = &bp->pf.vf[vf_id];
158 	if (trusted)
159 		vf->flags |= BNXT_VF_TRUST;
160 	else
161 		vf->flags &= ~BNXT_VF_TRUST;
162 
163 	bnxt_hwrm_set_trusted_vf(bp, vf);
164 	return 0;
165 }
166 
167 int bnxt_get_vf_config(struct net_device *dev, int vf_id,
168 		       struct ifla_vf_info *ivi)
169 {
170 	struct bnxt *bp = netdev_priv(dev);
171 	struct bnxt_vf_info *vf;
172 	int rc;
173 
174 	rc = bnxt_vf_ndo_prep(bp, vf_id);
175 	if (rc)
176 		return rc;
177 
178 	ivi->vf = vf_id;
179 	vf = &bp->pf.vf[vf_id];
180 
181 	if (is_valid_ether_addr(vf->mac_addr))
182 		memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
183 	else
184 		memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
185 	ivi->max_tx_rate = vf->max_tx_rate;
186 	ivi->min_tx_rate = vf->min_tx_rate;
187 	ivi->vlan = vf->vlan;
188 	if (vf->flags & BNXT_VF_QOS)
189 		ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
190 	else
191 		ivi->qos = 0;
192 	ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
193 	ivi->trusted = bnxt_is_trusted_vf(bp, vf);
194 	if (!(vf->flags & BNXT_VF_LINK_FORCED))
195 		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
196 	else if (vf->flags & BNXT_VF_LINK_UP)
197 		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
198 	else
199 		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
200 
201 	return 0;
202 }
203 
204 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
205 {
206 	struct hwrm_func_cfg_input req = {0};
207 	struct bnxt *bp = netdev_priv(dev);
208 	struct bnxt_vf_info *vf;
209 	int rc;
210 
211 	rc = bnxt_vf_ndo_prep(bp, vf_id);
212 	if (rc)
213 		return rc;
214 	/* reject bc or mc mac addr, zero mac addr means allow
215 	 * VF to use its own mac addr
216 	 */
217 	if (is_multicast_ether_addr(mac)) {
218 		netdev_err(dev, "Invalid VF ethernet address\n");
219 		return -EINVAL;
220 	}
221 	vf = &bp->pf.vf[vf_id];
222 
223 	memcpy(vf->mac_addr, mac, ETH_ALEN);
224 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
225 	req.fid = cpu_to_le16(vf->fw_fid);
226 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
227 	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
228 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
229 }
230 
231 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
232 		     __be16 vlan_proto)
233 {
234 	struct hwrm_func_cfg_input req = {0};
235 	struct bnxt *bp = netdev_priv(dev);
236 	struct bnxt_vf_info *vf;
237 	u16 vlan_tag;
238 	int rc;
239 
240 	if (bp->hwrm_spec_code < 0x10201)
241 		return -ENOTSUPP;
242 
243 	if (vlan_proto != htons(ETH_P_8021Q))
244 		return -EPROTONOSUPPORT;
245 
246 	rc = bnxt_vf_ndo_prep(bp, vf_id);
247 	if (rc)
248 		return rc;
249 
250 	/* TODO: needed to implement proper handling of user priority,
251 	 * currently fail the command if there is valid priority
252 	 */
253 	if (vlan_id > 4095 || qos)
254 		return -EINVAL;
255 
256 	vf = &bp->pf.vf[vf_id];
257 	vlan_tag = vlan_id;
258 	if (vlan_tag == vf->vlan)
259 		return 0;
260 
261 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
262 	req.fid = cpu_to_le16(vf->fw_fid);
263 	req.dflt_vlan = cpu_to_le16(vlan_tag);
264 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
265 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
266 	if (!rc)
267 		vf->vlan = vlan_tag;
268 	return rc;
269 }
270 
271 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
272 		   int max_tx_rate)
273 {
274 	struct hwrm_func_cfg_input req = {0};
275 	struct bnxt *bp = netdev_priv(dev);
276 	struct bnxt_vf_info *vf;
277 	u32 pf_link_speed;
278 	int rc;
279 
280 	rc = bnxt_vf_ndo_prep(bp, vf_id);
281 	if (rc)
282 		return rc;
283 
284 	vf = &bp->pf.vf[vf_id];
285 	pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
286 	if (max_tx_rate > pf_link_speed) {
287 		netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
288 			    max_tx_rate, vf_id);
289 		return -EINVAL;
290 	}
291 
292 	if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
293 		netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
294 			    min_tx_rate, vf_id);
295 		return -EINVAL;
296 	}
297 	if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
298 		return 0;
299 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
300 	req.fid = cpu_to_le16(vf->fw_fid);
301 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
302 	req.max_bw = cpu_to_le32(max_tx_rate);
303 	req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
304 	req.min_bw = cpu_to_le32(min_tx_rate);
305 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
306 	if (!rc) {
307 		vf->min_tx_rate = min_tx_rate;
308 		vf->max_tx_rate = max_tx_rate;
309 	}
310 	return rc;
311 }
312 
313 int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
314 {
315 	struct bnxt *bp = netdev_priv(dev);
316 	struct bnxt_vf_info *vf;
317 	int rc;
318 
319 	rc = bnxt_vf_ndo_prep(bp, vf_id);
320 	if (rc)
321 		return rc;
322 
323 	vf = &bp->pf.vf[vf_id];
324 
325 	vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
326 	switch (link) {
327 	case IFLA_VF_LINK_STATE_AUTO:
328 		vf->flags |= BNXT_VF_LINK_UP;
329 		break;
330 	case IFLA_VF_LINK_STATE_DISABLE:
331 		vf->flags |= BNXT_VF_LINK_FORCED;
332 		break;
333 	case IFLA_VF_LINK_STATE_ENABLE:
334 		vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
335 		break;
336 	default:
337 		netdev_err(bp->dev, "Invalid link option\n");
338 		rc = -EINVAL;
339 		break;
340 	}
341 	if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
342 		rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
343 			ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
344 	return rc;
345 }
346 
347 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
348 {
349 	int i;
350 	struct bnxt_vf_info *vf;
351 
352 	for (i = 0; i < num_vfs; i++) {
353 		vf = &bp->pf.vf[i];
354 		memset(vf, 0, sizeof(*vf));
355 	}
356 	return 0;
357 }
358 
359 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
360 {
361 	int i, rc = 0;
362 	struct bnxt_pf_info *pf = &bp->pf;
363 	struct hwrm_func_vf_resc_free_input req = {0};
364 
365 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
366 
367 	mutex_lock(&bp->hwrm_cmd_lock);
368 	for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
369 		req.vf_id = cpu_to_le16(i);
370 		rc = _hwrm_send_message(bp, &req, sizeof(req),
371 					HWRM_CMD_TIMEOUT);
372 		if (rc)
373 			break;
374 	}
375 	mutex_unlock(&bp->hwrm_cmd_lock);
376 	return rc;
377 }
378 
379 static void bnxt_free_vf_resources(struct bnxt *bp)
380 {
381 	struct pci_dev *pdev = bp->pdev;
382 	int i;
383 
384 	kfree(bp->pf.vf_event_bmap);
385 	bp->pf.vf_event_bmap = NULL;
386 
387 	for (i = 0; i < 4; i++) {
388 		if (bp->pf.hwrm_cmd_req_addr[i]) {
389 			dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
390 					  bp->pf.hwrm_cmd_req_addr[i],
391 					  bp->pf.hwrm_cmd_req_dma_addr[i]);
392 			bp->pf.hwrm_cmd_req_addr[i] = NULL;
393 		}
394 	}
395 
396 	bp->pf.active_vfs = 0;
397 	kfree(bp->pf.vf);
398 	bp->pf.vf = NULL;
399 }
400 
401 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
402 {
403 	struct pci_dev *pdev = bp->pdev;
404 	u32 nr_pages, size, i, j, k = 0;
405 
406 	bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
407 	if (!bp->pf.vf)
408 		return -ENOMEM;
409 
410 	bnxt_set_vf_attr(bp, num_vfs);
411 
412 	size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
413 	nr_pages = size / BNXT_PAGE_SIZE;
414 	if (size & (BNXT_PAGE_SIZE - 1))
415 		nr_pages++;
416 
417 	for (i = 0; i < nr_pages; i++) {
418 		bp->pf.hwrm_cmd_req_addr[i] =
419 			dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
420 					   &bp->pf.hwrm_cmd_req_dma_addr[i],
421 					   GFP_KERNEL);
422 
423 		if (!bp->pf.hwrm_cmd_req_addr[i])
424 			return -ENOMEM;
425 
426 		for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
427 			struct bnxt_vf_info *vf = &bp->pf.vf[k];
428 
429 			vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
430 						j * BNXT_HWRM_REQ_MAX_SIZE;
431 			vf->hwrm_cmd_req_dma_addr =
432 				bp->pf.hwrm_cmd_req_dma_addr[i] + j *
433 				BNXT_HWRM_REQ_MAX_SIZE;
434 			k++;
435 		}
436 	}
437 
438 	/* Max 128 VF's */
439 	bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
440 	if (!bp->pf.vf_event_bmap)
441 		return -ENOMEM;
442 
443 	bp->pf.hwrm_cmd_req_pages = nr_pages;
444 	return 0;
445 }
446 
447 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
448 {
449 	struct hwrm_func_buf_rgtr_input req = {0};
450 
451 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
452 
453 	req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
454 	req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
455 	req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
456 	req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
457 	req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
458 	req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
459 	req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
460 
461 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
462 }
463 
464 /* Caller holds bp->hwrm_cmd_lock mutex lock */
465 static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
466 {
467 	struct hwrm_func_cfg_input req = {0};
468 	struct bnxt_vf_info *vf;
469 
470 	vf = &bp->pf.vf[vf_id];
471 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
472 	req.fid = cpu_to_le16(vf->fw_fid);
473 
474 	if (is_valid_ether_addr(vf->mac_addr)) {
475 		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
476 		memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
477 	}
478 	if (vf->vlan) {
479 		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
480 		req.dflt_vlan = cpu_to_le16(vf->vlan);
481 	}
482 	if (vf->max_tx_rate) {
483 		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
484 		req.max_bw = cpu_to_le32(vf->max_tx_rate);
485 #ifdef HAVE_IFLA_TX_RATE
486 		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
487 		req.min_bw = cpu_to_le32(vf->min_tx_rate);
488 #endif
489 	}
490 	if (vf->flags & BNXT_VF_TRUST)
491 		req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
492 
493 	_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
494 }
495 
496 /* Only called by PF to reserve resources for VFs, returns actual number of
497  * VFs configured, or < 0 on error.
498  */
499 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
500 {
501 	struct hwrm_func_vf_resource_cfg_input req = {0};
502 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
503 	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
504 	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
505 	struct bnxt_pf_info *pf = &bp->pf;
506 	int i, rc = 0, min = 1;
507 	u16 vf_msix = 0;
508 	u16 vf_rss;
509 
510 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
511 
512 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
513 		vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
514 		vf_ring_grps = 0;
515 	} else {
516 		vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
517 	}
518 	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
519 	vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
520 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
521 		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
522 	else
523 		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
524 	vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
525 	vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
526 	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
527 	vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
528 
529 	req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
530 	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
531 		min = 0;
532 		req.min_rsscos_ctx = cpu_to_le16(min);
533 	}
534 	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
535 	    pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
536 		req.min_cmpl_rings = cpu_to_le16(min);
537 		req.min_tx_rings = cpu_to_le16(min);
538 		req.min_rx_rings = cpu_to_le16(min);
539 		req.min_l2_ctxs = cpu_to_le16(min);
540 		req.min_vnics = cpu_to_le16(min);
541 		req.min_stat_ctx = cpu_to_le16(min);
542 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
543 			req.min_hw_ring_grps = cpu_to_le16(min);
544 	} else {
545 		vf_cp_rings /= num_vfs;
546 		vf_tx_rings /= num_vfs;
547 		vf_rx_rings /= num_vfs;
548 		vf_vnics /= num_vfs;
549 		vf_stat_ctx /= num_vfs;
550 		vf_ring_grps /= num_vfs;
551 		vf_rss /= num_vfs;
552 
553 		req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
554 		req.min_tx_rings = cpu_to_le16(vf_tx_rings);
555 		req.min_rx_rings = cpu_to_le16(vf_rx_rings);
556 		req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
557 		req.min_vnics = cpu_to_le16(vf_vnics);
558 		req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
559 		req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
560 		req.min_rsscos_ctx = cpu_to_le16(vf_rss);
561 	}
562 	req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
563 	req.max_tx_rings = cpu_to_le16(vf_tx_rings);
564 	req.max_rx_rings = cpu_to_le16(vf_rx_rings);
565 	req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
566 	req.max_vnics = cpu_to_le16(vf_vnics);
567 	req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
568 	req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
569 	req.max_rsscos_ctx = cpu_to_le16(vf_rss);
570 	if (bp->flags & BNXT_FLAG_CHIP_P5)
571 		req.max_msix = cpu_to_le16(vf_msix / num_vfs);
572 
573 	mutex_lock(&bp->hwrm_cmd_lock);
574 	for (i = 0; i < num_vfs; i++) {
575 		if (reset)
576 			__bnxt_set_vf_params(bp, i);
577 
578 		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
579 		rc = _hwrm_send_message(bp, &req, sizeof(req),
580 					HWRM_CMD_TIMEOUT);
581 		if (rc)
582 			break;
583 		pf->active_vfs = i + 1;
584 		pf->vf[i].fw_fid = pf->first_vf_id + i;
585 	}
586 	mutex_unlock(&bp->hwrm_cmd_lock);
587 	if (pf->active_vfs) {
588 		u16 n = pf->active_vfs;
589 
590 		hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
591 		hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
592 		hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
593 					     n;
594 		hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
595 		hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
596 		hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
597 		hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
598 		if (bp->flags & BNXT_FLAG_CHIP_P5)
599 			hw_resc->max_irqs -= vf_msix * n;
600 
601 		rc = pf->active_vfs;
602 	}
603 	return rc;
604 }
605 
606 /* Only called by PF to reserve resources for VFs, returns actual number of
607  * VFs configured, or < 0 on error.
608  */
609 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
610 {
611 	u32 rc = 0, mtu, i;
612 	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
613 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
614 	struct hwrm_func_cfg_input req = {0};
615 	struct bnxt_pf_info *pf = &bp->pf;
616 	int total_vf_tx_rings = 0;
617 	u16 vf_ring_grps;
618 
619 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
620 
621 	/* Remaining rings are distributed equally amongs VF's for now */
622 	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
623 	vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
624 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
625 		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
626 			      num_vfs;
627 	else
628 		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
629 			      num_vfs;
630 	vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
631 	vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
632 	vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
633 	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
634 
635 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
636 				  FUNC_CFG_REQ_ENABLES_MRU |
637 				  FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
638 				  FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
639 				  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
640 				  FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
641 				  FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
642 				  FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
643 				  FUNC_CFG_REQ_ENABLES_NUM_VNICS |
644 				  FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
645 
646 	mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
647 	req.mru = cpu_to_le16(mtu);
648 	req.mtu = cpu_to_le16(mtu);
649 
650 	req.num_rsscos_ctxs = cpu_to_le16(1);
651 	req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
652 	req.num_tx_rings = cpu_to_le16(vf_tx_rings);
653 	req.num_rx_rings = cpu_to_le16(vf_rx_rings);
654 	req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
655 	req.num_l2_ctxs = cpu_to_le16(4);
656 
657 	req.num_vnics = cpu_to_le16(vf_vnics);
658 	/* FIXME spec currently uses 1 bit for stats ctx */
659 	req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
660 
661 	mutex_lock(&bp->hwrm_cmd_lock);
662 	for (i = 0; i < num_vfs; i++) {
663 		int vf_tx_rsvd = vf_tx_rings;
664 
665 		req.fid = cpu_to_le16(pf->first_vf_id + i);
666 		rc = _hwrm_send_message(bp, &req, sizeof(req),
667 					HWRM_CMD_TIMEOUT);
668 		if (rc)
669 			break;
670 		pf->active_vfs = i + 1;
671 		pf->vf[i].fw_fid = le16_to_cpu(req.fid);
672 		rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
673 					      &vf_tx_rsvd);
674 		if (rc)
675 			break;
676 		total_vf_tx_rings += vf_tx_rsvd;
677 	}
678 	mutex_unlock(&bp->hwrm_cmd_lock);
679 	if (pf->active_vfs) {
680 		hw_resc->max_tx_rings -= total_vf_tx_rings;
681 		hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
682 		hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
683 		hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
684 		hw_resc->max_rsscos_ctxs -= num_vfs;
685 		hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
686 		hw_resc->max_vnics -= vf_vnics * num_vfs;
687 		rc = pf->active_vfs;
688 	}
689 	return rc;
690 }
691 
692 static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
693 {
694 	if (BNXT_NEW_RM(bp))
695 		return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
696 	else
697 		return bnxt_hwrm_func_cfg(bp, num_vfs);
698 }
699 
700 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
701 {
702 	int rc;
703 
704 	/* Register buffers for VFs */
705 	rc = bnxt_hwrm_func_buf_rgtr(bp);
706 	if (rc)
707 		return rc;
708 
709 	/* Reserve resources for VFs */
710 	rc = bnxt_func_cfg(bp, *num_vfs, reset);
711 	if (rc != *num_vfs) {
712 		if (rc <= 0) {
713 			netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
714 			*num_vfs = 0;
715 			return rc;
716 		}
717 		netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
718 			    rc);
719 		*num_vfs = rc;
720 	}
721 
722 	bnxt_ulp_sriov_cfg(bp, *num_vfs);
723 	return 0;
724 }
725 
726 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
727 {
728 	int rc = 0, vfs_supported;
729 	int min_rx_rings, min_tx_rings, min_rss_ctxs;
730 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
731 	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
732 	int avail_cp, avail_stat;
733 
734 	/* Check if we can enable requested num of vf's. At a mininum
735 	 * we require 1 RX 1 TX rings for each VF. In this minimum conf
736 	 * features like TPA will not be available.
737 	 */
738 	vfs_supported = *num_vfs;
739 
740 	avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
741 	avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
742 	avail_cp = min_t(int, avail_cp, avail_stat);
743 
744 	while (vfs_supported) {
745 		min_rx_rings = vfs_supported;
746 		min_tx_rings = vfs_supported;
747 		min_rss_ctxs = vfs_supported;
748 
749 		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
750 			if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
751 			    min_rx_rings)
752 				rx_ok = 1;
753 		} else {
754 			if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
755 			    min_rx_rings)
756 				rx_ok = 1;
757 		}
758 		if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
759 		    avail_cp < min_rx_rings)
760 			rx_ok = 0;
761 
762 		if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
763 		    avail_cp >= min_tx_rings)
764 			tx_ok = 1;
765 
766 		if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
767 		    min_rss_ctxs)
768 			rss_ok = 1;
769 
770 		if (tx_ok && rx_ok && rss_ok)
771 			break;
772 
773 		vfs_supported--;
774 	}
775 
776 	if (!vfs_supported) {
777 		netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
778 		return -EINVAL;
779 	}
780 
781 	if (vfs_supported != *num_vfs) {
782 		netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
783 			    *num_vfs, vfs_supported);
784 		*num_vfs = vfs_supported;
785 	}
786 
787 	rc = bnxt_alloc_vf_resources(bp, *num_vfs);
788 	if (rc)
789 		goto err_out1;
790 
791 	rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
792 	if (rc)
793 		goto err_out2;
794 
795 	rc = pci_enable_sriov(bp->pdev, *num_vfs);
796 	if (rc)
797 		goto err_out2;
798 
799 	return 0;
800 
801 err_out2:
802 	/* Free the resources reserved for various VF's */
803 	bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
804 
805 err_out1:
806 	bnxt_free_vf_resources(bp);
807 
808 	return rc;
809 }
810 
811 void bnxt_sriov_disable(struct bnxt *bp)
812 {
813 	u16 num_vfs = pci_num_vf(bp->pdev);
814 
815 	if (!num_vfs)
816 		return;
817 
818 	/* synchronize VF and VF-rep create and destroy */
819 	mutex_lock(&bp->sriov_lock);
820 	bnxt_vf_reps_destroy(bp);
821 
822 	if (pci_vfs_assigned(bp->pdev)) {
823 		bnxt_hwrm_fwd_async_event_cmpl(
824 			bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
825 		netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
826 			    num_vfs);
827 	} else {
828 		pci_disable_sriov(bp->pdev);
829 		/* Free the HW resources reserved for various VF's */
830 		bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
831 	}
832 	mutex_unlock(&bp->sriov_lock);
833 
834 	bnxt_free_vf_resources(bp);
835 
836 	/* Reclaim all resources for the PF. */
837 	rtnl_lock();
838 	bnxt_restore_pf_fw_resources(bp);
839 	rtnl_unlock();
840 
841 	bnxt_ulp_sriov_cfg(bp, 0);
842 }
843 
844 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
845 {
846 	struct net_device *dev = pci_get_drvdata(pdev);
847 	struct bnxt *bp = netdev_priv(dev);
848 
849 	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
850 		netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
851 		return 0;
852 	}
853 
854 	rtnl_lock();
855 	if (!netif_running(dev)) {
856 		netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
857 		rtnl_unlock();
858 		return 0;
859 	}
860 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
861 		netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
862 		rtnl_unlock();
863 		return 0;
864 	}
865 	bp->sriov_cfg = true;
866 	rtnl_unlock();
867 
868 	if (pci_vfs_assigned(bp->pdev)) {
869 		netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
870 		num_vfs = 0;
871 		goto sriov_cfg_exit;
872 	}
873 
874 	/* Check if enabled VFs is same as requested */
875 	if (num_vfs && num_vfs == bp->pf.active_vfs)
876 		goto sriov_cfg_exit;
877 
878 	/* if there are previous existing VFs, clean them up */
879 	bnxt_sriov_disable(bp);
880 	if (!num_vfs)
881 		goto sriov_cfg_exit;
882 
883 	bnxt_sriov_enable(bp, &num_vfs);
884 
885 sriov_cfg_exit:
886 	bp->sriov_cfg = false;
887 	wake_up(&bp->sriov_cfg_wait);
888 
889 	return num_vfs;
890 }
891 
892 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
893 			      void *encap_resp, __le64 encap_resp_addr,
894 			      __le16 encap_resp_cpr, u32 msg_size)
895 {
896 	int rc = 0;
897 	struct hwrm_fwd_resp_input req = {0};
898 
899 	if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
900 		return -EINVAL;
901 
902 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
903 
904 	/* Set the new target id */
905 	req.target_id = cpu_to_le16(vf->fw_fid);
906 	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
907 	req.encap_resp_len = cpu_to_le16(msg_size);
908 	req.encap_resp_addr = encap_resp_addr;
909 	req.encap_resp_cmpl_ring = encap_resp_cpr;
910 	memcpy(req.encap_resp, encap_resp, msg_size);
911 
912 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
913 	if (rc)
914 		netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
915 	return rc;
916 }
917 
918 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
919 				  u32 msg_size)
920 {
921 	int rc = 0;
922 	struct hwrm_reject_fwd_resp_input req = {0};
923 
924 	if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
925 		return -EINVAL;
926 
927 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
928 	/* Set the new target id */
929 	req.target_id = cpu_to_le16(vf->fw_fid);
930 	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
931 	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
932 
933 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
934 	if (rc)
935 		netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
936 	return rc;
937 }
938 
939 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
940 				   u32 msg_size)
941 {
942 	int rc = 0;
943 	struct hwrm_exec_fwd_resp_input req = {0};
944 
945 	if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
946 		return -EINVAL;
947 
948 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
949 	/* Set the new target id */
950 	req.target_id = cpu_to_le16(vf->fw_fid);
951 	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
952 	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
953 
954 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
955 	if (rc)
956 		netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
957 	return rc;
958 }
959 
960 static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
961 {
962 	u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
963 	struct hwrm_func_vf_cfg_input *req =
964 		(struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
965 
966 	/* Allow VF to set a valid MAC address, if trust is set to on or
967 	 * if the PF assigned MAC address is zero
968 	 */
969 	if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
970 		bool trust = bnxt_is_trusted_vf(bp, vf);
971 
972 		if (is_valid_ether_addr(req->dflt_mac_addr) &&
973 		    (trust || !is_valid_ether_addr(vf->mac_addr) ||
974 		     ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
975 			ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
976 			return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
977 		}
978 		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
979 	}
980 	return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
981 }
982 
983 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
984 {
985 	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
986 	struct hwrm_cfa_l2_filter_alloc_input *req =
987 		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
988 	bool mac_ok = false;
989 
990 	if (!is_valid_ether_addr((const u8 *)req->l2_addr))
991 		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
992 
993 	/* Allow VF to set a valid MAC address, if trust is set to on.
994 	 * Or VF MAC address must first match MAC address in PF's context.
995 	 * Otherwise, it must match the VF MAC address if firmware spec >=
996 	 * 1.2.2
997 	 */
998 	if (bnxt_is_trusted_vf(bp, vf)) {
999 		mac_ok = true;
1000 	} else if (is_valid_ether_addr(vf->mac_addr)) {
1001 		if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1002 			mac_ok = true;
1003 	} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1004 		if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1005 			mac_ok = true;
1006 	} else {
1007 		/* There are two cases:
1008 		 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1009 		 *   to the PF and so it doesn't have to match
1010 		 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1011 		 *   valid MAC address and firmware spec >= 0x10202
1012 		 */
1013 		mac_ok = true;
1014 	}
1015 	if (mac_ok)
1016 		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1017 	return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1018 }
1019 
1020 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1021 {
1022 	int rc = 0;
1023 
1024 	if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1025 		/* real link */
1026 		rc = bnxt_hwrm_exec_fwd_resp(
1027 			bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1028 	} else {
1029 		struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
1030 		struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1031 
1032 		phy_qcfg_req =
1033 		(struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1034 		mutex_lock(&bp->hwrm_cmd_lock);
1035 		memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1036 		       sizeof(phy_qcfg_resp));
1037 		mutex_unlock(&bp->hwrm_cmd_lock);
1038 		phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1039 		phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1040 		phy_qcfg_resp.valid = 1;
1041 
1042 		if (vf->flags & BNXT_VF_LINK_UP) {
1043 			/* if physical link is down, force link up on VF */
1044 			if (phy_qcfg_resp.link !=
1045 			    PORT_PHY_QCFG_RESP_LINK_LINK) {
1046 				phy_qcfg_resp.link =
1047 					PORT_PHY_QCFG_RESP_LINK_LINK;
1048 				phy_qcfg_resp.link_speed = cpu_to_le16(
1049 					PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1050 				phy_qcfg_resp.duplex_cfg =
1051 					PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1052 				phy_qcfg_resp.duplex_state =
1053 					PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1054 				phy_qcfg_resp.pause =
1055 					(PORT_PHY_QCFG_RESP_PAUSE_TX |
1056 					 PORT_PHY_QCFG_RESP_PAUSE_RX);
1057 			}
1058 		} else {
1059 			/* force link down */
1060 			phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1061 			phy_qcfg_resp.link_speed = 0;
1062 			phy_qcfg_resp.duplex_state =
1063 				PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1064 			phy_qcfg_resp.pause = 0;
1065 		}
1066 		rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1067 					phy_qcfg_req->resp_addr,
1068 					phy_qcfg_req->cmpl_ring,
1069 					sizeof(phy_qcfg_resp));
1070 	}
1071 	return rc;
1072 }
1073 
1074 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1075 {
1076 	int rc = 0;
1077 	struct input *encap_req = vf->hwrm_cmd_req_addr;
1078 	u32 req_type = le16_to_cpu(encap_req->req_type);
1079 
1080 	switch (req_type) {
1081 	case HWRM_FUNC_VF_CFG:
1082 		rc = bnxt_vf_configure_mac(bp, vf);
1083 		break;
1084 	case HWRM_CFA_L2_FILTER_ALLOC:
1085 		rc = bnxt_vf_validate_set_mac(bp, vf);
1086 		break;
1087 	case HWRM_FUNC_CFG:
1088 		/* TODO Validate if VF is allowed to change mac address,
1089 		 * mtu, num of rings etc
1090 		 */
1091 		rc = bnxt_hwrm_exec_fwd_resp(
1092 			bp, vf, sizeof(struct hwrm_func_cfg_input));
1093 		break;
1094 	case HWRM_PORT_PHY_QCFG:
1095 		rc = bnxt_vf_set_link(bp, vf);
1096 		break;
1097 	default:
1098 		break;
1099 	}
1100 	return rc;
1101 }
1102 
1103 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1104 {
1105 	u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1106 
1107 	/* Scan through VF's and process commands */
1108 	while (1) {
1109 		vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1110 		if (vf_id >= active_vfs)
1111 			break;
1112 
1113 		clear_bit(vf_id, bp->pf.vf_event_bmap);
1114 		bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1115 		i = vf_id + 1;
1116 	}
1117 }
1118 
1119 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1120 {
1121 	struct hwrm_func_vf_cfg_input req = {0};
1122 	int rc = 0;
1123 
1124 	if (!BNXT_VF(bp))
1125 		return 0;
1126 
1127 	if (bp->hwrm_spec_code < 0x10202) {
1128 		if (is_valid_ether_addr(bp->vf.mac_addr))
1129 			rc = -EADDRNOTAVAIL;
1130 		goto mac_done;
1131 	}
1132 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
1133 	req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1134 	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1135 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1136 mac_done:
1137 	if (rc && strict) {
1138 		rc = -EADDRNOTAVAIL;
1139 		netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1140 			    mac);
1141 		return rc;
1142 	}
1143 	return 0;
1144 }
1145 
1146 void bnxt_update_vf_mac(struct bnxt *bp)
1147 {
1148 	struct hwrm_func_qcaps_input req = {0};
1149 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1150 	bool inform_pf = false;
1151 
1152 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
1153 	req.fid = cpu_to_le16(0xffff);
1154 
1155 	mutex_lock(&bp->hwrm_cmd_lock);
1156 	if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
1157 		goto update_vf_mac_exit;
1158 
1159 	/* Store MAC address from the firmware.  There are 2 cases:
1160 	 * 1. MAC address is valid.  It is assigned from the PF and we
1161 	 *    need to override the current VF MAC address with it.
1162 	 * 2. MAC address is zero.  The VF will use a random MAC address by
1163 	 *    default but the stored zero MAC will allow the VF user to change
1164 	 *    the random MAC address using ndo_set_mac_address() if he wants.
1165 	 */
1166 	if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) {
1167 		memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1168 		/* This means we are now using our own MAC address, let
1169 		 * the PF know about this MAC address.
1170 		 */
1171 		if (!is_valid_ether_addr(bp->vf.mac_addr))
1172 			inform_pf = true;
1173 	}
1174 
1175 	/* overwrite netdev dev_addr with admin VF MAC */
1176 	if (is_valid_ether_addr(bp->vf.mac_addr))
1177 		memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
1178 update_vf_mac_exit:
1179 	mutex_unlock(&bp->hwrm_cmd_lock);
1180 	if (inform_pf)
1181 		bnxt_approve_mac(bp, bp->dev->dev_addr, false);
1182 }
1183 
1184 #else
1185 
1186 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1187 {
1188 	if (*num_vfs)
1189 		return -EOPNOTSUPP;
1190 	return 0;
1191 }
1192 
1193 void bnxt_sriov_disable(struct bnxt *bp)
1194 {
1195 }
1196 
1197 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1198 {
1199 	netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1200 }
1201 
1202 void bnxt_update_vf_mac(struct bnxt *bp)
1203 {
1204 }
1205 
1206 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1207 {
1208 	return 0;
1209 }
1210 #endif
1211