1 /* Intel(R) Ethernet Switch Host Interface Driver
2  * Copyright(c) 2013 - 2017 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * The full GNU General Public License is included in this distribution in
14  * the file called "COPYING".
15  *
16  * Contact Information:
17  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19  */
20 
21 #include "fm10k.h"
22 #include "fm10k_vf.h"
23 #include "fm10k_pf.h"
24 
25 static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
26 			       struct fm10k_mbx_info *mbx)
27 {
28 	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
29 	struct fm10k_intfc *interface = hw->back;
30 	struct pci_dev *pdev = interface->pdev;
31 
32 	dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
33 		**results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
34 
35 	return fm10k_tlv_msg_error(hw, results, mbx);
36 }
37 
38 /**
39  *  fm10k_iov_msg_queue_mac_vlan - Message handler for MAC/VLAN request from VF
40  *  @hw: Pointer to hardware structure
41  *  @results: Pointer array to message, results[0] is pointer to message
42  *  @mbx: Pointer to mailbox information structure
43  *
44  *  This function is a custom handler for MAC/VLAN requests from the VF. The
45  *  assumption is that it is acceptable to directly hand off the message from
46  *  the VF to the PF's switch manager. However, we use a MAC/VLAN message
47  *  queue to avoid overloading the mailbox when a large number of requests
48  *  come in.
49  **/
50 static s32 fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw *hw, u32 **results,
51 					struct fm10k_mbx_info *mbx)
52 {
53 	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
54 	struct fm10k_intfc *interface = hw->back;
55 	u8 mac[ETH_ALEN];
56 	u32 *result;
57 	int err = 0;
58 	bool set;
59 	u16 vlan;
60 	u32 vid;
61 
62 	/* we shouldn't be updating rules on a disabled interface */
63 	if (!FM10K_VF_FLAG_ENABLED(vf_info))
64 		err = FM10K_ERR_PARAM;
65 
66 	if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
67 		result = results[FM10K_MAC_VLAN_MSG_VLAN];
68 
69 		/* record VLAN id requested */
70 		err = fm10k_tlv_attr_get_u32(result, &vid);
71 		if (err)
72 			return err;
73 
74 		set = !(vid & FM10K_VLAN_CLEAR);
75 		vid &= ~FM10K_VLAN_CLEAR;
76 
77 		/* if the length field has been set, this is a multi-bit
78 		 * update request. For multi-bit requests, simply disallow
79 		 * them when the pf_vid has been set. In this case, the PF
80 		 * should have already cleared the VLAN_TABLE, and if we
81 		 * allowed them, it could allow a rogue VF to receive traffic
82 		 * on a VLAN it was not assigned. In the single-bit case, we
83 		 * need to modify requests for VLAN 0 to use the default PF or
84 		 * SW vid when assigned.
85 		 */
86 
87 		if (vid >> 16) {
88 			/* prevent multi-bit requests when PF has
89 			 * administratively set the VLAN for this VF
90 			 */
91 			if (vf_info->pf_vid)
92 				return FM10K_ERR_PARAM;
93 		} else {
94 			err = fm10k_iov_select_vid(vf_info, (u16)vid);
95 			if (err < 0)
96 				return err;
97 
98 			vid = err;
99 		}
100 
101 		/* update VSI info for VF in regards to VLAN table */
102 		err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
103 	}
104 
105 	if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
106 		result = results[FM10K_MAC_VLAN_MSG_MAC];
107 
108 		/* record unicast MAC address requested */
109 		err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
110 		if (err)
111 			return err;
112 
113 		/* block attempts to set MAC for a locked device */
114 		if (is_valid_ether_addr(vf_info->mac) &&
115 		    !ether_addr_equal(mac, vf_info->mac))
116 			return FM10K_ERR_PARAM;
117 
118 		set = !(vlan & FM10K_VLAN_CLEAR);
119 		vlan &= ~FM10K_VLAN_CLEAR;
120 
121 		err = fm10k_iov_select_vid(vf_info, vlan);
122 		if (err < 0)
123 			return err;
124 
125 		vlan = (u16)err;
126 
127 		/* Add this request to the MAC/VLAN queue */
128 		err = fm10k_queue_mac_request(interface, vf_info->glort,
129 					      mac, vlan, set);
130 	}
131 
132 	if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
133 		result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
134 
135 		/* record multicast MAC address requested */
136 		err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
137 		if (err)
138 			return err;
139 
140 		/* verify that the VF is allowed to request multicast */
141 		if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
142 			return FM10K_ERR_PARAM;
143 
144 		set = !(vlan & FM10K_VLAN_CLEAR);
145 		vlan &= ~FM10K_VLAN_CLEAR;
146 
147 		err = fm10k_iov_select_vid(vf_info, vlan);
148 		if (err < 0)
149 			return err;
150 
151 		vlan = (u16)err;
152 
153 		/* Add this request to the MAC/VLAN queue */
154 		err = fm10k_queue_mac_request(interface, vf_info->glort,
155 					      mac, vlan, set);
156 	}
157 
158 	return err;
159 }
160 
161 static const struct fm10k_msg_data iov_mbx_data[] = {
162 	FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
163 	FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
164 	FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_queue_mac_vlan),
165 	FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
166 	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
167 };
168 
169 s32 fm10k_iov_event(struct fm10k_intfc *interface)
170 {
171 	struct fm10k_hw *hw = &interface->hw;
172 	struct fm10k_iov_data *iov_data;
173 	s64 vflre;
174 	int i;
175 
176 	/* if there is no iov_data then there is no mailbox to process */
177 	if (!READ_ONCE(interface->iov_data))
178 		return 0;
179 
180 	rcu_read_lock();
181 
182 	iov_data = interface->iov_data;
183 
184 	/* check again now that we are in the RCU block */
185 	if (!iov_data)
186 		goto read_unlock;
187 
188 	if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
189 		goto read_unlock;
190 
191 	/* read VFLRE to determine if any VFs have been reset */
192 	vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1));
193 	vflre <<= 32;
194 	vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
195 
196 	i = iov_data->num_vfs;
197 
198 	for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
199 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
200 
201 		if (vflre >= 0)
202 			continue;
203 
204 		hw->iov.ops.reset_resources(hw, vf_info);
205 		vf_info->mbx.ops.connect(hw, &vf_info->mbx);
206 	}
207 
208 read_unlock:
209 	rcu_read_unlock();
210 
211 	return 0;
212 }
213 
214 s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
215 {
216 	struct fm10k_hw *hw = &interface->hw;
217 	struct fm10k_iov_data *iov_data;
218 	int i;
219 
220 	/* if there is no iov_data then there is no mailbox to process */
221 	if (!READ_ONCE(interface->iov_data))
222 		return 0;
223 
224 	rcu_read_lock();
225 
226 	iov_data = interface->iov_data;
227 
228 	/* check again now that we are in the RCU block */
229 	if (!iov_data)
230 		goto read_unlock;
231 
232 	/* lock the mailbox for transmit and receive */
233 	fm10k_mbx_lock(interface);
234 
235 	/* Most VF messages sent to the PF cause the PF to respond by
236 	 * requesting from the SM mailbox. This means that too many VF
237 	 * messages processed at once could cause a mailbox timeout on the PF.
238 	 * To prevent this, store a pointer to the next VF mbx to process. Use
239 	 * that as the start of the loop so that we don't starve whichever VF
240 	 * got ignored on the previous run.
241 	 */
242 process_mbx:
243 	for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
244 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
245 		struct fm10k_mbx_info *mbx = &vf_info->mbx;
246 		u16 glort = vf_info->glort;
247 
248 		/* process the SM mailbox first to drain outgoing messages */
249 		hw->mbx.ops.process(hw, &hw->mbx);
250 
251 		/* verify port mapping is valid, if not reset port */
252 		if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) {
253 			hw->iov.ops.reset_lport(hw, vf_info);
254 			fm10k_clear_macvlan_queue(interface, glort, false);
255 		}
256 
257 		/* reset VFs that have mailbox timed out */
258 		if (!mbx->timeout) {
259 			hw->iov.ops.reset_resources(hw, vf_info);
260 			mbx->ops.connect(hw, mbx);
261 		}
262 
263 		/* guarantee we have free space in the SM mailbox */
264 		if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
265 			/* keep track of how many times this occurs */
266 			interface->hw_sm_mbx_full++;
267 
268 			/* make sure we try again momentarily */
269 			fm10k_service_event_schedule(interface);
270 
271 			break;
272 		}
273 
274 		/* cleanup mailbox and process received messages */
275 		mbx->ops.process(hw, mbx);
276 	}
277 
278 	/* if we stopped processing mailboxes early, update next_vf_mbx.
279 	 * Otherwise, reset next_vf_mbx, and restart loop so that we process
280 	 * the remaining mailboxes we skipped at the start.
281 	 */
282 	if (i >= 0) {
283 		iov_data->next_vf_mbx = i + 1;
284 	} else if (iov_data->next_vf_mbx) {
285 		iov_data->next_vf_mbx = 0;
286 		goto process_mbx;
287 	}
288 
289 	/* free the lock */
290 	fm10k_mbx_unlock(interface);
291 
292 read_unlock:
293 	rcu_read_unlock();
294 
295 	return 0;
296 }
297 
298 void fm10k_iov_suspend(struct pci_dev *pdev)
299 {
300 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
301 	struct fm10k_iov_data *iov_data = interface->iov_data;
302 	struct fm10k_hw *hw = &interface->hw;
303 	int num_vfs, i;
304 
305 	/* pull out num_vfs from iov_data */
306 	num_vfs = iov_data ? iov_data->num_vfs : 0;
307 
308 	/* shut down queue mapping for VFs */
309 	fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
310 			FM10K_DGLORTMAP_NONE);
311 
312 	/* Stop any active VFs and reset their resources */
313 	for (i = 0; i < num_vfs; i++) {
314 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
315 
316 		hw->iov.ops.reset_resources(hw, vf_info);
317 		hw->iov.ops.reset_lport(hw, vf_info);
318 		fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
319 	}
320 }
321 
322 int fm10k_iov_resume(struct pci_dev *pdev)
323 {
324 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
325 	struct fm10k_iov_data *iov_data = interface->iov_data;
326 	struct fm10k_dglort_cfg dglort = { 0 };
327 	struct fm10k_hw *hw = &interface->hw;
328 	int num_vfs, i;
329 
330 	/* pull out num_vfs from iov_data */
331 	num_vfs = iov_data ? iov_data->num_vfs : 0;
332 
333 	/* return error if iov_data is not already populated */
334 	if (!iov_data)
335 		return -ENOMEM;
336 
337 	/* allocate hardware resources for the VFs */
338 	hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
339 
340 	/* configure DGLORT mapping for RSS */
341 	dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
342 	dglort.idx = fm10k_dglort_vf_rss;
343 	dglort.inner_rss = 1;
344 	dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
345 	dglort.queue_b = fm10k_vf_queue_index(hw, 0);
346 	dglort.vsi_l = fls(hw->iov.total_vfs - 1);
347 	dglort.vsi_b = 1;
348 
349 	hw->mac.ops.configure_dglort_map(hw, &dglort);
350 
351 	/* assign resources to the device */
352 	for (i = 0; i < num_vfs; i++) {
353 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
354 
355 		/* allocate all but the last GLORT to the VFs */
356 		if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
357 			break;
358 
359 		/* assign GLORT to VF, and restrict it to multicast */
360 		hw->iov.ops.set_lport(hw, vf_info, i,
361 				      FM10K_VF_FLAG_MULTI_CAPABLE);
362 
363 		/* mailbox is disconnected so we don't send a message */
364 		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
365 
366 		/* now we are ready so we can connect */
367 		vf_info->mbx.ops.connect(hw, &vf_info->mbx);
368 	}
369 
370 	return 0;
371 }
372 
373 s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
374 {
375 	struct fm10k_iov_data *iov_data = interface->iov_data;
376 	struct fm10k_hw *hw = &interface->hw;
377 	struct fm10k_vf_info *vf_info;
378 	u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
379 
380 	/* no IOV support, not our message to process */
381 	if (!iov_data)
382 		return FM10K_ERR_PARAM;
383 
384 	/* glort outside our range, not our message to process */
385 	if (vf_idx >= iov_data->num_vfs)
386 		return FM10K_ERR_PARAM;
387 
388 	/* determine if an update has occurred and if so notify the VF */
389 	vf_info = &iov_data->vf_info[vf_idx];
390 	if (vf_info->sw_vid != pvid) {
391 		vf_info->sw_vid = pvid;
392 		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
393 	}
394 
395 	return 0;
396 }
397 
398 static void fm10k_iov_free_data(struct pci_dev *pdev)
399 {
400 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
401 
402 	if (!interface->iov_data)
403 		return;
404 
405 	/* reclaim hardware resources */
406 	fm10k_iov_suspend(pdev);
407 
408 	/* drop iov_data from interface */
409 	kfree_rcu(interface->iov_data, rcu);
410 	interface->iov_data = NULL;
411 }
412 
413 static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
414 {
415 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
416 	struct fm10k_iov_data *iov_data = interface->iov_data;
417 	struct fm10k_hw *hw = &interface->hw;
418 	size_t size;
419 	int i, err;
420 
421 	/* return error if iov_data is already populated */
422 	if (iov_data)
423 		return -EBUSY;
424 
425 	/* The PF should always be able to assign resources */
426 	if (!hw->iov.ops.assign_resources)
427 		return -ENODEV;
428 
429 	/* nothing to do if no VFs are requested */
430 	if (!num_vfs)
431 		return 0;
432 
433 	/* allocate memory for VF storage */
434 	size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
435 	iov_data = kzalloc(size, GFP_KERNEL);
436 	if (!iov_data)
437 		return -ENOMEM;
438 
439 	/* record number of VFs */
440 	iov_data->num_vfs = num_vfs;
441 
442 	/* loop through vf_info structures initializing each entry */
443 	for (i = 0; i < num_vfs; i++) {
444 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
445 
446 		/* Record VF VSI value */
447 		vf_info->vsi = i + 1;
448 		vf_info->vf_idx = i;
449 
450 		/* initialize mailbox memory */
451 		err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
452 		if (err) {
453 			dev_err(&pdev->dev,
454 				"Unable to initialize SR-IOV mailbox\n");
455 			kfree(iov_data);
456 			return err;
457 		}
458 	}
459 
460 	/* assign iov_data to interface */
461 	interface->iov_data = iov_data;
462 
463 	/* allocate hardware resources for the VFs */
464 	fm10k_iov_resume(pdev);
465 
466 	return 0;
467 }
468 
469 void fm10k_iov_disable(struct pci_dev *pdev)
470 {
471 	if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
472 		dev_err(&pdev->dev,
473 			"Cannot disable SR-IOV while VFs are assigned\n");
474 	else
475 		pci_disable_sriov(pdev);
476 
477 	fm10k_iov_free_data(pdev);
478 }
479 
480 static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
481 {
482 	u32 err_sev;
483 	int pos;
484 
485 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
486 	if (!pos)
487 		return;
488 
489 	pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
490 	err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
491 	pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
492 }
493 
494 int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
495 {
496 	int current_vfs = pci_num_vf(pdev);
497 	int err = 0;
498 
499 	if (current_vfs && pci_vfs_assigned(pdev)) {
500 		dev_err(&pdev->dev,
501 			"Cannot modify SR-IOV while VFs are assigned\n");
502 		num_vfs = current_vfs;
503 	} else {
504 		pci_disable_sriov(pdev);
505 		fm10k_iov_free_data(pdev);
506 	}
507 
508 	/* allocate resources for the VFs */
509 	err = fm10k_iov_alloc_data(pdev, num_vfs);
510 	if (err)
511 		return err;
512 
513 	/* allocate VFs if not already allocated */
514 	if (num_vfs && num_vfs != current_vfs) {
515 		/* Disable completer abort error reporting as
516 		 * the VFs can trigger this any time they read a queue
517 		 * that they don't own.
518 		 */
519 		fm10k_disable_aer_comp_abort(pdev);
520 
521 		err = pci_enable_sriov(pdev, num_vfs);
522 		if (err) {
523 			dev_err(&pdev->dev,
524 				"Enable PCI SR-IOV failed: %d\n", err);
525 			return err;
526 		}
527 	}
528 
529 	return num_vfs;
530 }
531 
532 static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
533 				       struct fm10k_vf_info *vf_info)
534 {
535 	struct fm10k_hw *hw = &interface->hw;
536 
537 	/* assigning the MAC address will send a mailbox message */
538 	fm10k_mbx_lock(interface);
539 
540 	/* disable LPORT for this VF which clears switch rules */
541 	hw->iov.ops.reset_lport(hw, vf_info);
542 
543 	fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
544 
545 	/* assign new MAC+VLAN for this VF */
546 	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
547 
548 	/* re-enable the LPORT for this VF */
549 	hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
550 			      FM10K_VF_FLAG_MULTI_CAPABLE);
551 
552 	fm10k_mbx_unlock(interface);
553 }
554 
555 int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
556 {
557 	struct fm10k_intfc *interface = netdev_priv(netdev);
558 	struct fm10k_iov_data *iov_data = interface->iov_data;
559 	struct fm10k_vf_info *vf_info;
560 
561 	/* verify SR-IOV is active and that vf idx is valid */
562 	if (!iov_data || vf_idx >= iov_data->num_vfs)
563 		return -EINVAL;
564 
565 	/* verify MAC addr is valid */
566 	if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
567 		return -EINVAL;
568 
569 	/* record new MAC address */
570 	vf_info = &iov_data->vf_info[vf_idx];
571 	ether_addr_copy(vf_info->mac, mac);
572 
573 	fm10k_reset_vf_info(interface, vf_info);
574 
575 	return 0;
576 }
577 
578 int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
579 			  u8 qos, __be16 vlan_proto)
580 {
581 	struct fm10k_intfc *interface = netdev_priv(netdev);
582 	struct fm10k_iov_data *iov_data = interface->iov_data;
583 	struct fm10k_hw *hw = &interface->hw;
584 	struct fm10k_vf_info *vf_info;
585 
586 	/* verify SR-IOV is active and that vf idx is valid */
587 	if (!iov_data || vf_idx >= iov_data->num_vfs)
588 		return -EINVAL;
589 
590 	/* QOS is unsupported and VLAN IDs accepted range 0-4094 */
591 	if (qos || (vid > (VLAN_VID_MASK - 1)))
592 		return -EINVAL;
593 
594 	/* VF VLAN Protocol part to default is unsupported */
595 	if (vlan_proto != htons(ETH_P_8021Q))
596 		return -EPROTONOSUPPORT;
597 
598 	vf_info = &iov_data->vf_info[vf_idx];
599 
600 	/* exit if there is nothing to do */
601 	if (vf_info->pf_vid == vid)
602 		return 0;
603 
604 	/* record default VLAN ID for VF */
605 	vf_info->pf_vid = vid;
606 
607 	/* Clear the VLAN table for the VF */
608 	hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
609 
610 	fm10k_reset_vf_info(interface, vf_info);
611 
612 	return 0;
613 }
614 
615 int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
616 			int __always_unused min_rate, int max_rate)
617 {
618 	struct fm10k_intfc *interface = netdev_priv(netdev);
619 	struct fm10k_iov_data *iov_data = interface->iov_data;
620 	struct fm10k_hw *hw = &interface->hw;
621 
622 	/* verify SR-IOV is active and that vf idx is valid */
623 	if (!iov_data || vf_idx >= iov_data->num_vfs)
624 		return -EINVAL;
625 
626 	/* rate limit cannot be less than 10Mbs or greater than link speed */
627 	if (max_rate &&
628 	    (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
629 		return -EINVAL;
630 
631 	/* store values */
632 	iov_data->vf_info[vf_idx].rate = max_rate;
633 
634 	/* update hardware configuration */
635 	hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
636 
637 	return 0;
638 }
639 
640 int fm10k_ndo_get_vf_config(struct net_device *netdev,
641 			    int vf_idx, struct ifla_vf_info *ivi)
642 {
643 	struct fm10k_intfc *interface = netdev_priv(netdev);
644 	struct fm10k_iov_data *iov_data = interface->iov_data;
645 	struct fm10k_vf_info *vf_info;
646 
647 	/* verify SR-IOV is active and that vf idx is valid */
648 	if (!iov_data || vf_idx >= iov_data->num_vfs)
649 		return -EINVAL;
650 
651 	vf_info = &iov_data->vf_info[vf_idx];
652 
653 	ivi->vf = vf_idx;
654 	ivi->max_tx_rate = vf_info->rate;
655 	ivi->min_tx_rate = 0;
656 	ether_addr_copy(ivi->mac, vf_info->mac);
657 	ivi->vlan = vf_info->pf_vid;
658 	ivi->qos = 0;
659 
660 	return 0;
661 }
662