xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/nfp_net_main.c (revision ac73d4bf2cdaf2cb8a43df8ee4a5c066d2c5d7b4)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3 
4 /*
5  * nfp_net_main.c
6  * Netronome network device driver: Main entry point
7  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8  *          Alejandro Lucero <alejandro.lucero@netronome.com>
9  *          Jason McMullan <jason.mcmullan@netronome.com>
10  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
11  */
12 
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/lockdep.h>
17 #include <linux/pci.h>
18 #include <linux/pci_regs.h>
19 #include <linux/msi.h>
20 #include <linux/random.h>
21 #include <linux/rtnetlink.h>
22 
23 #include "nfpcore/nfp.h"
24 #include "nfpcore/nfp_cpp.h"
25 #include "nfpcore/nfp_dev.h"
26 #include "nfpcore/nfp_nffw.h"
27 #include "nfpcore/nfp_nsp.h"
28 #include "nfpcore/nfp6000_pcie.h"
29 #include "nfp_app.h"
30 #include "nfp_net_ctrl.h"
31 #include "nfp_net_sriov.h"
32 #include "nfp_net.h"
33 #include "nfp_main.h"
34 #include "nfp_port.h"
35 
36 #define NFP_PF_CSR_SLICE_SIZE	(32 * 1024)
37 
38 /**
39  * nfp_net_get_mac_addr() - Get the MAC address.
40  * @pf:       NFP PF handle
41  * @netdev:   net_device to set MAC address on
42  * @port:     NFP port structure
43  *
44  * First try to get the MAC address from NSP ETH table. If that
45  * fails generate a random address.
46  */
47 void
48 nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
49 		     struct nfp_port *port)
50 {
51 	struct nfp_eth_table_port *eth_port;
52 
53 	eth_port = __nfp_port_get_eth_port(port);
54 	if (!eth_port) {
55 		eth_hw_addr_random(netdev);
56 		return;
57 	}
58 
59 	eth_hw_addr_set(netdev, eth_port->mac_addr);
60 	ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
61 }
62 
63 static struct nfp_eth_table_port *
64 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
65 {
66 	int i;
67 
68 	for (i = 0; eth_tbl && i < eth_tbl->count; i++)
69 		if (eth_tbl->ports[i].index == index)
70 			return &eth_tbl->ports[i];
71 
72 	return NULL;
73 }
74 
75 static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
76 {
77 	return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
78 }
79 
80 static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
81 {
82 	if (nfp_net_is_data_vnic(nn))
83 		nfp_app_vnic_free(pf->app, nn);
84 	nfp_port_free(nn->port);
85 	list_del(&nn->vnic_list);
86 	pf->num_vnics--;
87 	nfp_net_free(nn);
88 }
89 
90 static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
91 {
92 	struct nfp_net *nn, *next;
93 
94 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
95 		if (nfp_net_is_data_vnic(nn))
96 			nfp_net_pf_free_vnic(pf, nn);
97 }
98 
99 static struct nfp_net *
100 nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
101 		      void __iomem *ctrl_bar, void __iomem *qc_bar,
102 		      int stride, unsigned int id)
103 {
104 	u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
105 	struct nfp_net *nn;
106 	int err;
107 
108 	tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
109 	rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
110 	n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
111 	n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
112 
113 	/* Allocate and initialise the vNIC */
114 	nn = nfp_net_alloc(pf->pdev, pf->dev_info, ctrl_bar, needs_netdev,
115 			   n_tx_rings, n_rx_rings);
116 	if (IS_ERR(nn))
117 		return nn;
118 
119 	nn->app = pf->app;
120 	nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
121 	nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
122 	nn->dp.is_vf = 0;
123 	nn->stride_rx = stride;
124 	nn->stride_tx = stride;
125 
126 	if (needs_netdev) {
127 		err = nfp_app_vnic_alloc(pf->app, nn, id);
128 		if (err) {
129 			nfp_net_free(nn);
130 			return ERR_PTR(err);
131 		}
132 	}
133 
134 	pf->num_vnics++;
135 	list_add_tail(&nn->vnic_list, &pf->vnics);
136 
137 	return nn;
138 }
139 
140 static int
141 nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
142 {
143 	int err;
144 
145 	nn->id = id;
146 
147 	if (nn->port) {
148 		err = nfp_devlink_port_register(pf->app, nn->port);
149 		if (err)
150 			return err;
151 	}
152 
153 	err = nfp_net_init(nn);
154 	if (err)
155 		goto err_devlink_port_clean;
156 
157 	nfp_net_debugfs_vnic_add(nn, pf->ddir);
158 
159 	nfp_net_info(nn);
160 
161 	if (nfp_net_is_data_vnic(nn)) {
162 		err = nfp_app_vnic_init(pf->app, nn);
163 		if (err)
164 			goto err_debugfs_vnic_clean;
165 	}
166 
167 	return 0;
168 
169 err_debugfs_vnic_clean:
170 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
171 	nfp_net_clean(nn);
172 err_devlink_port_clean:
173 	if (nn->port)
174 		nfp_devlink_port_unregister(nn->port);
175 	return err;
176 }
177 
178 static int
179 nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
180 		       void __iomem *qc_bar, int stride)
181 {
182 	struct nfp_net *nn;
183 	unsigned int i;
184 	int err;
185 
186 	for (i = 0; i < pf->max_data_vnics; i++) {
187 		nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
188 					   stride, i);
189 		if (IS_ERR(nn)) {
190 			err = PTR_ERR(nn);
191 			goto err_free_prev;
192 		}
193 
194 		if (nn->port)
195 			nn->port->link_cb = nfp_net_refresh_port_table;
196 
197 		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
198 
199 		/* Kill the vNIC if app init marked it as invalid */
200 		if (nn->port && nn->port->type == NFP_PORT_INVALID)
201 			nfp_net_pf_free_vnic(pf, nn);
202 	}
203 
204 	if (list_empty(&pf->vnics))
205 		return -ENODEV;
206 
207 	return 0;
208 
209 err_free_prev:
210 	nfp_net_pf_free_vnics(pf);
211 	return err;
212 }
213 
214 static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
215 {
216 	if (nfp_net_is_data_vnic(nn))
217 		nfp_app_vnic_clean(pf->app, nn);
218 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
219 	nfp_net_clean(nn);
220 	if (nn->port)
221 		nfp_devlink_port_unregister(nn->port);
222 }
223 
224 static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
225 {
226 	unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
227 	struct nfp_net *nn;
228 
229 	/* Get MSI-X vectors */
230 	wanted_irqs = 0;
231 	list_for_each_entry(nn, &pf->vnics, vnic_list)
232 		wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
233 	pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
234 				  GFP_KERNEL);
235 	if (!pf->irq_entries)
236 		return -ENOMEM;
237 
238 	num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
239 				      NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
240 				      wanted_irqs);
241 	if (!num_irqs) {
242 		nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
243 		kfree(pf->irq_entries);
244 		return -ENOMEM;
245 	}
246 
247 	/* Distribute IRQs to vNICs */
248 	irqs_left = num_irqs;
249 	vnics_left = pf->num_vnics;
250 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
251 		unsigned int n;
252 
253 		n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
254 			DIV_ROUND_UP(irqs_left, vnics_left));
255 		nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
256 				    n);
257 		irqs_left -= n;
258 		vnics_left--;
259 	}
260 
261 	return 0;
262 }
263 
264 static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
265 {
266 	nfp_net_irqs_disable(pf->pdev);
267 	kfree(pf->irq_entries);
268 }
269 
270 static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
271 {
272 	struct nfp_net *nn;
273 	unsigned int id;
274 	int err;
275 
276 	/* Finish vNIC init and register */
277 	id = 0;
278 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
279 		if (!nfp_net_is_data_vnic(nn))
280 			continue;
281 		err = nfp_net_pf_init_vnic(pf, nn, id);
282 		if (err)
283 			goto err_prev_deinit;
284 
285 		id++;
286 	}
287 
288 	return 0;
289 
290 err_prev_deinit:
291 	list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
292 		if (nfp_net_is_data_vnic(nn))
293 			nfp_net_pf_clean_vnic(pf, nn);
294 	return err;
295 }
296 
297 static int
298 nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
299 {
300 	struct devlink *devlink = priv_to_devlink(pf);
301 	u8 __iomem *ctrl_bar;
302 	int err;
303 
304 	pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
305 	if (IS_ERR(pf->app))
306 		return PTR_ERR(pf->app);
307 
308 	devl_lock(devlink);
309 	err = nfp_app_init(pf->app);
310 	devl_unlock(devlink);
311 	if (err)
312 		goto err_free;
313 
314 	if (!nfp_app_needs_ctrl_vnic(pf->app))
315 		return 0;
316 
317 	ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
318 				    NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar);
319 	if (IS_ERR(ctrl_bar)) {
320 		nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
321 		err = PTR_ERR(ctrl_bar);
322 		goto err_app_clean;
323 	}
324 
325 	pf->ctrl_vnic =	nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
326 					      stride, 0);
327 	if (IS_ERR(pf->ctrl_vnic)) {
328 		err = PTR_ERR(pf->ctrl_vnic);
329 		goto err_unmap;
330 	}
331 
332 	return 0;
333 
334 err_unmap:
335 	nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
336 err_app_clean:
337 	devl_lock(devlink);
338 	nfp_app_clean(pf->app);
339 	devl_unlock(devlink);
340 err_free:
341 	nfp_app_free(pf->app);
342 	pf->app = NULL;
343 	return err;
344 }
345 
346 static void nfp_net_pf_app_clean(struct nfp_pf *pf)
347 {
348 	struct devlink *devlink = priv_to_devlink(pf);
349 
350 	if (pf->ctrl_vnic) {
351 		nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
352 		nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
353 	}
354 
355 	devl_lock(devlink);
356 	nfp_app_clean(pf->app);
357 	devl_unlock(devlink);
358 
359 	nfp_app_free(pf->app);
360 	pf->app = NULL;
361 }
362 
363 static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
364 {
365 	int err;
366 
367 	if (!pf->ctrl_vnic)
368 		return 0;
369 	err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
370 	if (err)
371 		return err;
372 
373 	err = nfp_ctrl_open(pf->ctrl_vnic);
374 	if (err)
375 		goto err_clean_ctrl;
376 
377 	return 0;
378 
379 err_clean_ctrl:
380 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
381 	return err;
382 }
383 
384 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
385 {
386 	if (!pf->ctrl_vnic)
387 		return;
388 	nfp_ctrl_close(pf->ctrl_vnic);
389 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
390 }
391 
392 static int nfp_net_pf_app_start(struct nfp_pf *pf)
393 {
394 	int err;
395 
396 	err = nfp_net_pf_app_start_ctrl(pf);
397 	if (err)
398 		return err;
399 
400 	err = nfp_app_start(pf->app, pf->ctrl_vnic);
401 	if (err)
402 		goto err_ctrl_stop;
403 
404 	if (pf->num_vfs) {
405 		err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
406 		if (err)
407 			goto err_app_stop;
408 	}
409 
410 	return 0;
411 
412 err_app_stop:
413 	nfp_app_stop(pf->app);
414 err_ctrl_stop:
415 	nfp_net_pf_app_stop_ctrl(pf);
416 	return err;
417 }
418 
419 static void nfp_net_pf_app_stop(struct nfp_pf *pf)
420 {
421 	if (pf->num_vfs)
422 		nfp_app_sriov_disable(pf->app);
423 	nfp_app_stop(pf->app);
424 	nfp_net_pf_app_stop_ctrl(pf);
425 }
426 
427 static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
428 {
429 	if (pf->vfcfg_tbl2_area)
430 		nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
431 	if (pf->vf_cfg_bar)
432 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
433 	if (pf->mac_stats_bar)
434 		nfp_cpp_area_release_free(pf->mac_stats_bar);
435 	nfp_cpp_area_release_free(pf->qc_area);
436 	nfp_cpp_area_release_free(pf->data_vnic_bar);
437 }
438 
439 static int nfp_net_pci_map_mem(struct nfp_pf *pf)
440 {
441 	u32 min_size, cpp_id;
442 	u8 __iomem *mem;
443 	int err;
444 
445 	min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
446 	mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
447 			       min_size, &pf->data_vnic_bar);
448 	if (IS_ERR(mem)) {
449 		nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
450 		return PTR_ERR(mem);
451 	}
452 
453 	if (pf->eth_tbl) {
454 		min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
455 		pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
456 						  "net.macstats", min_size,
457 						  &pf->mac_stats_bar);
458 		if (IS_ERR(pf->mac_stats_mem)) {
459 			if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
460 				err = PTR_ERR(pf->mac_stats_mem);
461 				goto err_unmap_ctrl;
462 			}
463 			pf->mac_stats_mem = NULL;
464 		}
465 	}
466 
467 	pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar",
468 					  NFP_NET_CFG_BAR_SZ * pf->limit_vfs,
469 					  &pf->vf_cfg_bar);
470 	if (IS_ERR(pf->vf_cfg_mem)) {
471 		if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
472 			err = PTR_ERR(pf->vf_cfg_mem);
473 			goto err_unmap_mac_stats;
474 		}
475 		pf->vf_cfg_mem = NULL;
476 	}
477 
478 	min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ;
479 	pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2",
480 					  "_pf%d_net_vf_cfg2",
481 					  min_size, &pf->vfcfg_tbl2_area);
482 	if (IS_ERR(pf->vfcfg_tbl2)) {
483 		if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) {
484 			err = PTR_ERR(pf->vfcfg_tbl2);
485 			goto err_unmap_vf_cfg;
486 		}
487 		pf->vfcfg_tbl2 = NULL;
488 	}
489 
490 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
491 	mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id,
492 			       nfp_qcp_queue_offset(pf->dev_info, 0),
493 			       pf->dev_info->qc_area_sz, &pf->qc_area);
494 	if (IS_ERR(mem)) {
495 		nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
496 		err = PTR_ERR(mem);
497 		goto err_unmap_vfcfg_tbl2;
498 	}
499 
500 	return 0;
501 
502 err_unmap_vfcfg_tbl2:
503 	if (pf->vfcfg_tbl2_area)
504 		nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
505 err_unmap_vf_cfg:
506 	if (pf->vf_cfg_bar)
507 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
508 err_unmap_mac_stats:
509 	if (pf->mac_stats_bar)
510 		nfp_cpp_area_release_free(pf->mac_stats_bar);
511 err_unmap_ctrl:
512 	nfp_cpp_area_release_free(pf->data_vnic_bar);
513 	return err;
514 }
515 
516 static const unsigned int lr_to_speed[] = {
517 	[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED]	= 0,
518 	[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]	= SPEED_UNKNOWN,
519 	[NFP_NET_CFG_STS_LINK_RATE_1G]		= SPEED_1000,
520 	[NFP_NET_CFG_STS_LINK_RATE_10G]		= SPEED_10000,
521 	[NFP_NET_CFG_STS_LINK_RATE_25G]		= SPEED_25000,
522 	[NFP_NET_CFG_STS_LINK_RATE_40G]		= SPEED_40000,
523 	[NFP_NET_CFG_STS_LINK_RATE_50G]		= SPEED_50000,
524 	[NFP_NET_CFG_STS_LINK_RATE_100G]	= SPEED_100000,
525 };
526 
527 unsigned int nfp_net_lr2speed(unsigned int linkrate)
528 {
529 	if (linkrate < ARRAY_SIZE(lr_to_speed))
530 		return lr_to_speed[linkrate];
531 
532 	return SPEED_UNKNOWN;
533 }
534 
535 unsigned int nfp_net_speed2lr(unsigned int speed)
536 {
537 	int i;
538 
539 	for (i = 0; i < ARRAY_SIZE(lr_to_speed); i++) {
540 		if (speed == lr_to_speed[i])
541 			return i;
542 	}
543 
544 	return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
545 }
546 
547 static void nfp_net_notify_port_speed(struct nfp_port *port)
548 {
549 	struct net_device *netdev = port->netdev;
550 	struct nfp_net *nn;
551 	u16 sts;
552 
553 	if (!nfp_netdev_is_nfp_net(netdev))
554 		return;
555 
556 	nn = netdev_priv(netdev);
557 	sts = nn_readw(nn, NFP_NET_CFG_STS);
558 
559 	if (!(sts & NFP_NET_CFG_STS_LINK)) {
560 		nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
561 		return;
562 	}
563 
564 	nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, nfp_net_speed2lr(port->eth_port->speed));
565 }
566 
567 static int
568 nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
569 			struct nfp_eth_table *eth_table)
570 {
571 	struct nfp_eth_table_port *eth_port;
572 
573 	ASSERT_RTNL();
574 
575 	eth_port = nfp_net_find_port(eth_table, port->eth_id);
576 	if (!eth_port) {
577 		set_bit(NFP_PORT_CHANGED, &port->flags);
578 		nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
579 			 port->eth_id);
580 		return -EIO;
581 	}
582 	if (eth_port->override_changed) {
583 		nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id);
584 		port->type = NFP_PORT_INVALID;
585 	}
586 
587 	memcpy(port->eth_port, eth_port, sizeof(*eth_port));
588 	nfp_net_notify_port_speed(port);
589 
590 	return 0;
591 }
592 
593 int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
594 {
595 	struct devlink *devlink = priv_to_devlink(pf);
596 	struct nfp_eth_table *eth_table;
597 	struct nfp_net *nn, *next;
598 	struct nfp_port *port;
599 	int err;
600 
601 	devl_assert_locked(devlink);
602 
603 	/* Check for nfp_net_pci_remove() racing against us */
604 	if (list_empty(&pf->vnics))
605 		return 0;
606 
607 	/* Update state of all ports */
608 	rtnl_lock();
609 	list_for_each_entry(port, &pf->ports, port_list)
610 		clear_bit(NFP_PORT_CHANGED, &port->flags);
611 
612 	eth_table = nfp_eth_read_ports(pf->cpp);
613 	if (!eth_table) {
614 		list_for_each_entry(port, &pf->ports, port_list)
615 			if (__nfp_port_get_eth_port(port))
616 				set_bit(NFP_PORT_CHANGED, &port->flags);
617 		rtnl_unlock();
618 		nfp_err(pf->cpp, "Error refreshing port config!\n");
619 		return -EIO;
620 	}
621 
622 	list_for_each_entry(port, &pf->ports, port_list)
623 		if (__nfp_port_get_eth_port(port))
624 			nfp_net_eth_port_update(pf->cpp, port, eth_table);
625 	rtnl_unlock();
626 
627 	kfree(eth_table);
628 
629 	/* Resync repr state. This may cause reprs to be removed. */
630 	err = nfp_reprs_resync_phys_ports(pf->app);
631 	if (err)
632 		return err;
633 
634 	/* Shoot off the ports which became invalid */
635 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
636 		if (!nn->port || nn->port->type != NFP_PORT_INVALID)
637 			continue;
638 
639 		nfp_net_pf_clean_vnic(pf, nn);
640 		nfp_net_pf_free_vnic(pf, nn);
641 	}
642 
643 	return 0;
644 }
645 
646 static void nfp_net_refresh_vnics(struct work_struct *work)
647 {
648 	struct nfp_pf *pf = container_of(work, struct nfp_pf,
649 					 port_refresh_work);
650 	struct devlink *devlink = priv_to_devlink(pf);
651 
652 	devl_lock(devlink);
653 	nfp_net_refresh_port_table_sync(pf);
654 	devl_unlock(devlink);
655 }
656 
657 void nfp_net_refresh_port_table(struct nfp_port *port)
658 {
659 	struct nfp_pf *pf = port->app->pf;
660 
661 	set_bit(NFP_PORT_CHANGED, &port->flags);
662 
663 	queue_work(pf->wq, &pf->port_refresh_work);
664 }
665 
666 int nfp_net_refresh_eth_port(struct nfp_port *port)
667 {
668 	struct nfp_cpp *cpp = port->app->cpp;
669 	struct nfp_eth_table *eth_table;
670 	int ret;
671 
672 	clear_bit(NFP_PORT_CHANGED, &port->flags);
673 
674 	eth_table = nfp_eth_read_ports(cpp);
675 	if (!eth_table) {
676 		set_bit(NFP_PORT_CHANGED, &port->flags);
677 		nfp_err(cpp, "Error refreshing port state table!\n");
678 		return -EIO;
679 	}
680 
681 	ret = nfp_net_eth_port_update(cpp, port, eth_table);
682 
683 	kfree(eth_table);
684 
685 	return ret;
686 }
687 
688 /*
689  * PCI device functions
690  */
691 int nfp_net_pci_probe(struct nfp_pf *pf)
692 {
693 	struct devlink *devlink = priv_to_devlink(pf);
694 	struct nfp_net_fw_version fw_ver;
695 	u8 __iomem *ctrl_bar, *qc_bar;
696 	int stride;
697 	int err;
698 
699 	INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
700 
701 	if (!pf->rtbl) {
702 		nfp_err(pf->cpp, "No %s, giving up.\n",
703 			pf->fw_loaded ? "symbol table" : "firmware found");
704 		return -EINVAL;
705 	}
706 
707 	pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
708 	if ((int)pf->max_data_vnics < 0)
709 		return pf->max_data_vnics;
710 
711 	err = nfp_net_pci_map_mem(pf);
712 	if (err)
713 		return err;
714 
715 	ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
716 	qc_bar = nfp_cpp_area_iomem(pf->qc_area);
717 	if (!ctrl_bar || !qc_bar) {
718 		err = -EIO;
719 		goto err_unmap;
720 	}
721 
722 	nfp_net_get_fw_version(&fw_ver, ctrl_bar);
723 	if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
724 	    fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
725 		nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
726 			fw_ver.extend, fw_ver.class,
727 			fw_ver.major, fw_ver.minor);
728 		err = -EINVAL;
729 		goto err_unmap;
730 	}
731 
732 	/* Determine stride */
733 	if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
734 		stride = 2;
735 		nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
736 	} else {
737 		switch (fw_ver.major) {
738 		case 1 ... 5:
739 			stride = 4;
740 			break;
741 		default:
742 			nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
743 				fw_ver.extend, fw_ver.class,
744 				fw_ver.major, fw_ver.minor);
745 			err = -EINVAL;
746 			goto err_unmap;
747 		}
748 	}
749 
750 	err = nfp_net_pf_app_init(pf, qc_bar, stride);
751 	if (err)
752 		goto err_unmap;
753 
754 	err = nfp_shared_buf_register(pf);
755 	if (err)
756 		goto err_devlink_unreg;
757 
758 	err = nfp_devlink_params_register(pf);
759 	if (err)
760 		goto err_shared_buf_unreg;
761 
762 	devl_lock(devlink);
763 	pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
764 
765 	/* Allocate the vnics and do basic init */
766 	err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
767 	if (err)
768 		goto err_clean_ddir;
769 
770 	err = nfp_net_pf_alloc_irqs(pf);
771 	if (err)
772 		goto err_free_vnics;
773 
774 	err = nfp_net_pf_app_start(pf);
775 	if (err)
776 		goto err_free_irqs;
777 
778 	err = nfp_net_pf_init_vnics(pf);
779 	if (err)
780 		goto err_stop_app;
781 
782 	devl_unlock(devlink);
783 	devlink_register(devlink);
784 
785 	return 0;
786 
787 err_stop_app:
788 	nfp_net_pf_app_stop(pf);
789 err_free_irqs:
790 	nfp_net_pf_free_irqs(pf);
791 err_free_vnics:
792 	nfp_net_pf_free_vnics(pf);
793 err_clean_ddir:
794 	nfp_net_debugfs_dir_clean(&pf->ddir);
795 	devl_unlock(devlink);
796 	nfp_devlink_params_unregister(pf);
797 err_shared_buf_unreg:
798 	nfp_shared_buf_unregister(pf);
799 err_devlink_unreg:
800 	cancel_work_sync(&pf->port_refresh_work);
801 	nfp_net_pf_app_clean(pf);
802 err_unmap:
803 	nfp_net_pci_unmap_mem(pf);
804 	return err;
805 }
806 
807 void nfp_net_pci_remove(struct nfp_pf *pf)
808 {
809 	struct devlink *devlink = priv_to_devlink(pf);
810 	struct nfp_net *nn, *next;
811 
812 	devlink_unregister(priv_to_devlink(pf));
813 	devl_lock(devlink);
814 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
815 		if (!nfp_net_is_data_vnic(nn))
816 			continue;
817 		nfp_net_pf_clean_vnic(pf, nn);
818 		nfp_net_pf_free_vnic(pf, nn);
819 	}
820 
821 	nfp_net_pf_app_stop(pf);
822 	/* stop app first, to avoid double free of ctrl vNIC's ddir */
823 	nfp_net_debugfs_dir_clean(&pf->ddir);
824 
825 	devl_unlock(devlink);
826 
827 	nfp_devlink_params_unregister(pf);
828 	nfp_shared_buf_unregister(pf);
829 
830 	nfp_net_pf_free_irqs(pf);
831 	nfp_net_pf_app_clean(pf);
832 	nfp_net_pci_unmap_mem(pf);
833 
834 	cancel_work_sync(&pf->port_refresh_work);
835 }
836