1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3 
4 /*
5  * nfp_net_main.c
6  * Netronome network device driver: Main entry point
7  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8  *          Alejandro Lucero <alejandro.lucero@netronome.com>
9  *          Jason McMullan <jason.mcmullan@netronome.com>
10  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
11  */
12 
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/lockdep.h>
17 #include <linux/pci.h>
18 #include <linux/pci_regs.h>
19 #include <linux/msi.h>
20 #include <linux/random.h>
21 #include <linux/rtnetlink.h>
22 
23 #include "nfpcore/nfp.h"
24 #include "nfpcore/nfp_cpp.h"
25 #include "nfpcore/nfp_dev.h"
26 #include "nfpcore/nfp_nffw.h"
27 #include "nfpcore/nfp_nsp.h"
28 #include "nfpcore/nfp6000_pcie.h"
29 #include "nfp_app.h"
30 #include "nfp_net_ctrl.h"
31 #include "nfp_net_sriov.h"
32 #include "nfp_net.h"
33 #include "nfp_main.h"
34 #include "nfp_port.h"
35 
36 #define NFP_PF_CSR_SLICE_SIZE	(32 * 1024)
37 
38 /**
39  * nfp_net_get_mac_addr() - Get the MAC address.
40  * @pf:       NFP PF handle
41  * @netdev:   net_device to set MAC address on
42  * @port:     NFP port structure
43  *
44  * First try to get the MAC address from NSP ETH table. If that
45  * fails generate a random address.
46  */
47 void
48 nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
49 		     struct nfp_port *port)
50 {
51 	struct nfp_eth_table_port *eth_port;
52 
53 	eth_port = __nfp_port_get_eth_port(port);
54 	if (!eth_port) {
55 		eth_hw_addr_random(netdev);
56 		return;
57 	}
58 
59 	eth_hw_addr_set(netdev, eth_port->mac_addr);
60 	ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
61 }
62 
63 static struct nfp_eth_table_port *
64 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
65 {
66 	int i;
67 
68 	for (i = 0; eth_tbl && i < eth_tbl->count; i++)
69 		if (eth_tbl->ports[i].index == index)
70 			return &eth_tbl->ports[i];
71 
72 	return NULL;
73 }
74 
75 static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
76 {
77 	return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
78 }
79 
80 static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
81 {
82 	return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
83 					  NFP_APP_CORE_NIC);
84 }
85 
86 static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
87 {
88 	if (nfp_net_is_data_vnic(nn))
89 		nfp_app_vnic_free(pf->app, nn);
90 	nfp_port_free(nn->port);
91 	list_del(&nn->vnic_list);
92 	pf->num_vnics--;
93 	nfp_net_free(nn);
94 }
95 
96 static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
97 {
98 	struct nfp_net *nn, *next;
99 
100 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
101 		if (nfp_net_is_data_vnic(nn))
102 			nfp_net_pf_free_vnic(pf, nn);
103 }
104 
105 static struct nfp_net *
106 nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
107 		      void __iomem *ctrl_bar, void __iomem *qc_bar,
108 		      int stride, unsigned int id)
109 {
110 	u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
111 	struct nfp_net *nn;
112 	int err;
113 
114 	tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
115 	rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
116 	n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
117 	n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
118 
119 	/* Allocate and initialise the vNIC */
120 	nn = nfp_net_alloc(pf->pdev, pf->dev_info, ctrl_bar, needs_netdev,
121 			   n_tx_rings, n_rx_rings);
122 	if (IS_ERR(nn))
123 		return nn;
124 
125 	nn->app = pf->app;
126 	nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
127 	nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
128 	nn->dp.is_vf = 0;
129 	nn->stride_rx = stride;
130 	nn->stride_tx = stride;
131 
132 	if (needs_netdev) {
133 		err = nfp_app_vnic_alloc(pf->app, nn, id);
134 		if (err) {
135 			nfp_net_free(nn);
136 			return ERR_PTR(err);
137 		}
138 	}
139 
140 	pf->num_vnics++;
141 	list_add_tail(&nn->vnic_list, &pf->vnics);
142 
143 	return nn;
144 }
145 
146 static int
147 nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
148 {
149 	int err;
150 
151 	nn->id = id;
152 
153 	if (nn->port) {
154 		err = nfp_devlink_port_register(pf->app, nn->port);
155 		if (err)
156 			return err;
157 	}
158 
159 	err = nfp_net_init(nn);
160 	if (err)
161 		goto err_devlink_port_clean;
162 
163 	nfp_net_debugfs_vnic_add(nn, pf->ddir);
164 
165 	if (nn->port)
166 		nfp_devlink_port_type_eth_set(nn->port);
167 
168 	nfp_net_info(nn);
169 
170 	if (nfp_net_is_data_vnic(nn)) {
171 		err = nfp_app_vnic_init(pf->app, nn);
172 		if (err)
173 			goto err_devlink_port_type_clean;
174 	}
175 
176 	return 0;
177 
178 err_devlink_port_type_clean:
179 	if (nn->port)
180 		nfp_devlink_port_type_clear(nn->port);
181 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
182 	nfp_net_clean(nn);
183 err_devlink_port_clean:
184 	if (nn->port)
185 		nfp_devlink_port_unregister(nn->port);
186 	return err;
187 }
188 
189 static int
190 nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
191 		       void __iomem *qc_bar, int stride)
192 {
193 	struct nfp_net *nn;
194 	unsigned int i;
195 	int err;
196 
197 	for (i = 0; i < pf->max_data_vnics; i++) {
198 		nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
199 					   stride, i);
200 		if (IS_ERR(nn)) {
201 			err = PTR_ERR(nn);
202 			goto err_free_prev;
203 		}
204 
205 		if (nn->port)
206 			nn->port->link_cb = nfp_net_refresh_port_table;
207 
208 		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
209 		pf->sp_indiff |= nn->tlv_caps.sp_indiff;
210 
211 		/* Kill the vNIC if app init marked it as invalid */
212 		if (nn->port && nn->port->type == NFP_PORT_INVALID)
213 			nfp_net_pf_free_vnic(pf, nn);
214 	}
215 
216 	if (list_empty(&pf->vnics))
217 		return -ENODEV;
218 
219 	return 0;
220 
221 err_free_prev:
222 	nfp_net_pf_free_vnics(pf);
223 	return err;
224 }
225 
226 static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
227 {
228 	if (nfp_net_is_data_vnic(nn))
229 		nfp_app_vnic_clean(pf->app, nn);
230 	if (nn->port)
231 		nfp_devlink_port_type_clear(nn->port);
232 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
233 	nfp_net_clean(nn);
234 	if (nn->port)
235 		nfp_devlink_port_unregister(nn->port);
236 }
237 
238 static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
239 {
240 	unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
241 	struct nfp_net *nn;
242 
243 	/* Get MSI-X vectors */
244 	wanted_irqs = 0;
245 	list_for_each_entry(nn, &pf->vnics, vnic_list)
246 		wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
247 	pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
248 				  GFP_KERNEL);
249 	if (!pf->irq_entries)
250 		return -ENOMEM;
251 
252 	num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
253 				      NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
254 				      wanted_irqs);
255 	if (!num_irqs) {
256 		nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
257 		kfree(pf->irq_entries);
258 		return -ENOMEM;
259 	}
260 
261 	/* Distribute IRQs to vNICs */
262 	irqs_left = num_irqs;
263 	vnics_left = pf->num_vnics;
264 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
265 		unsigned int n;
266 
267 		n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
268 			DIV_ROUND_UP(irqs_left, vnics_left));
269 		nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
270 				    n);
271 		irqs_left -= n;
272 		vnics_left--;
273 	}
274 
275 	return 0;
276 }
277 
278 static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
279 {
280 	nfp_net_irqs_disable(pf->pdev);
281 	kfree(pf->irq_entries);
282 }
283 
284 static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
285 {
286 	struct nfp_net *nn;
287 	unsigned int id;
288 	int err;
289 
290 	/* Finish vNIC init and register */
291 	id = 0;
292 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
293 		if (!nfp_net_is_data_vnic(nn))
294 			continue;
295 		err = nfp_net_pf_init_vnic(pf, nn, id);
296 		if (err)
297 			goto err_prev_deinit;
298 
299 		id++;
300 	}
301 
302 	return 0;
303 
304 err_prev_deinit:
305 	list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
306 		if (nfp_net_is_data_vnic(nn))
307 			nfp_net_pf_clean_vnic(pf, nn);
308 	return err;
309 }
310 
311 static int nfp_net_pf_cfg_nsp(struct nfp_pf *pf, bool sp_indiff)
312 {
313 	struct nfp_nsp *nsp;
314 	char hwinfo[32];
315 	int err;
316 
317 	nsp = nfp_nsp_open(pf->cpp);
318 	if (IS_ERR(nsp)) {
319 		err = PTR_ERR(nsp);
320 		return err;
321 	}
322 
323 	snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff);
324 	err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));
325 	if (err)
326 		nfp_warn(pf->cpp, "HWinfo(sp_indiff=%d) set failed: %d\n", sp_indiff, err);
327 
328 	nfp_nsp_close(nsp);
329 	return err;
330 }
331 
332 static int nfp_net_pf_init_nsp(struct nfp_pf *pf)
333 {
334 	return nfp_net_pf_cfg_nsp(pf, pf->sp_indiff);
335 }
336 
337 static void nfp_net_pf_clean_nsp(struct nfp_pf *pf)
338 {
339 	(void)nfp_net_pf_cfg_nsp(pf, false);
340 }
341 
342 static int
343 nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
344 {
345 	struct devlink *devlink = priv_to_devlink(pf);
346 	u8 __iomem *ctrl_bar;
347 	int err;
348 
349 	pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
350 	if (IS_ERR(pf->app))
351 		return PTR_ERR(pf->app);
352 
353 	pf->sp_indiff |= pf->app->type->id == NFP_APP_FLOWER_NIC;
354 
355 	devl_lock(devlink);
356 	err = nfp_app_init(pf->app);
357 	devl_unlock(devlink);
358 	if (err)
359 		goto err_free;
360 
361 	if (!nfp_app_needs_ctrl_vnic(pf->app))
362 		return 0;
363 
364 	ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
365 				    NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar);
366 	if (IS_ERR(ctrl_bar)) {
367 		nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
368 		err = PTR_ERR(ctrl_bar);
369 		goto err_app_clean;
370 	}
371 
372 	pf->ctrl_vnic =	nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
373 					      stride, 0);
374 	if (IS_ERR(pf->ctrl_vnic)) {
375 		err = PTR_ERR(pf->ctrl_vnic);
376 		goto err_unmap;
377 	}
378 
379 	return 0;
380 
381 err_unmap:
382 	nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
383 err_app_clean:
384 	devl_lock(devlink);
385 	nfp_app_clean(pf->app);
386 	devl_unlock(devlink);
387 err_free:
388 	nfp_app_free(pf->app);
389 	pf->app = NULL;
390 	return err;
391 }
392 
393 static void nfp_net_pf_app_clean(struct nfp_pf *pf)
394 {
395 	struct devlink *devlink = priv_to_devlink(pf);
396 
397 	if (pf->ctrl_vnic) {
398 		nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
399 		nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
400 	}
401 
402 	devl_lock(devlink);
403 	nfp_app_clean(pf->app);
404 	devl_unlock(devlink);
405 
406 	nfp_app_free(pf->app);
407 	pf->app = NULL;
408 }
409 
410 static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
411 {
412 	int err;
413 
414 	if (!pf->ctrl_vnic)
415 		return 0;
416 	err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
417 	if (err)
418 		return err;
419 
420 	err = nfp_ctrl_open(pf->ctrl_vnic);
421 	if (err)
422 		goto err_clean_ctrl;
423 
424 	return 0;
425 
426 err_clean_ctrl:
427 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
428 	return err;
429 }
430 
431 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
432 {
433 	if (!pf->ctrl_vnic)
434 		return;
435 	nfp_ctrl_close(pf->ctrl_vnic);
436 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
437 }
438 
439 static int nfp_net_pf_app_start(struct nfp_pf *pf)
440 {
441 	int err;
442 
443 	err = nfp_net_pf_app_start_ctrl(pf);
444 	if (err)
445 		return err;
446 
447 	err = nfp_app_start(pf->app, pf->ctrl_vnic);
448 	if (err)
449 		goto err_ctrl_stop;
450 
451 	if (pf->num_vfs) {
452 		err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
453 		if (err)
454 			goto err_app_stop;
455 	}
456 
457 	return 0;
458 
459 err_app_stop:
460 	nfp_app_stop(pf->app);
461 err_ctrl_stop:
462 	nfp_net_pf_app_stop_ctrl(pf);
463 	return err;
464 }
465 
466 static void nfp_net_pf_app_stop(struct nfp_pf *pf)
467 {
468 	if (pf->num_vfs)
469 		nfp_app_sriov_disable(pf->app);
470 	nfp_app_stop(pf->app);
471 	nfp_net_pf_app_stop_ctrl(pf);
472 }
473 
474 static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
475 {
476 	if (pf->vfcfg_tbl2_area)
477 		nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
478 	if (pf->vf_cfg_bar)
479 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
480 	if (pf->mac_stats_bar)
481 		nfp_cpp_area_release_free(pf->mac_stats_bar);
482 	nfp_cpp_area_release_free(pf->qc_area);
483 	nfp_cpp_area_release_free(pf->data_vnic_bar);
484 }
485 
486 static int nfp_net_pci_map_mem(struct nfp_pf *pf)
487 {
488 	u32 min_size, cpp_id;
489 	u8 __iomem *mem;
490 	int err;
491 
492 	min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
493 	mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
494 			       min_size, &pf->data_vnic_bar);
495 	if (IS_ERR(mem)) {
496 		nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
497 		return PTR_ERR(mem);
498 	}
499 
500 	if (pf->eth_tbl) {
501 		min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
502 		pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
503 						  "net.macstats", min_size,
504 						  &pf->mac_stats_bar);
505 		if (IS_ERR(pf->mac_stats_mem)) {
506 			if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
507 				err = PTR_ERR(pf->mac_stats_mem);
508 				goto err_unmap_ctrl;
509 			}
510 			pf->mac_stats_mem = NULL;
511 		}
512 	}
513 
514 	pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar",
515 					  NFP_NET_CFG_BAR_SZ * pf->limit_vfs,
516 					  &pf->vf_cfg_bar);
517 	if (IS_ERR(pf->vf_cfg_mem)) {
518 		if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
519 			err = PTR_ERR(pf->vf_cfg_mem);
520 			goto err_unmap_mac_stats;
521 		}
522 		pf->vf_cfg_mem = NULL;
523 	}
524 
525 	min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ;
526 	pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2",
527 					  "_pf%d_net_vf_cfg2",
528 					  min_size, &pf->vfcfg_tbl2_area);
529 	if (IS_ERR(pf->vfcfg_tbl2)) {
530 		if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) {
531 			err = PTR_ERR(pf->vfcfg_tbl2);
532 			goto err_unmap_vf_cfg;
533 		}
534 		pf->vfcfg_tbl2 = NULL;
535 	}
536 
537 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
538 	mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id,
539 			       nfp_qcp_queue_offset(pf->dev_info, 0),
540 			       pf->dev_info->qc_area_sz, &pf->qc_area);
541 	if (IS_ERR(mem)) {
542 		nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
543 		err = PTR_ERR(mem);
544 		goto err_unmap_vfcfg_tbl2;
545 	}
546 
547 	return 0;
548 
549 err_unmap_vfcfg_tbl2:
550 	if (pf->vfcfg_tbl2_area)
551 		nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
552 err_unmap_vf_cfg:
553 	if (pf->vf_cfg_bar)
554 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
555 err_unmap_mac_stats:
556 	if (pf->mac_stats_bar)
557 		nfp_cpp_area_release_free(pf->mac_stats_bar);
558 err_unmap_ctrl:
559 	nfp_cpp_area_release_free(pf->data_vnic_bar);
560 	return err;
561 }
562 
563 static const unsigned int lr_to_speed[] = {
564 	[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED]	= 0,
565 	[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]	= SPEED_UNKNOWN,
566 	[NFP_NET_CFG_STS_LINK_RATE_1G]		= SPEED_1000,
567 	[NFP_NET_CFG_STS_LINK_RATE_10G]		= SPEED_10000,
568 	[NFP_NET_CFG_STS_LINK_RATE_25G]		= SPEED_25000,
569 	[NFP_NET_CFG_STS_LINK_RATE_40G]		= SPEED_40000,
570 	[NFP_NET_CFG_STS_LINK_RATE_50G]		= SPEED_50000,
571 	[NFP_NET_CFG_STS_LINK_RATE_100G]	= SPEED_100000,
572 };
573 
574 unsigned int nfp_net_lr2speed(unsigned int linkrate)
575 {
576 	if (linkrate < ARRAY_SIZE(lr_to_speed))
577 		return lr_to_speed[linkrate];
578 
579 	return SPEED_UNKNOWN;
580 }
581 
582 unsigned int nfp_net_speed2lr(unsigned int speed)
583 {
584 	int i;
585 
586 	for (i = 0; i < ARRAY_SIZE(lr_to_speed); i++) {
587 		if (speed == lr_to_speed[i])
588 			return i;
589 	}
590 
591 	return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
592 }
593 
594 static void nfp_net_notify_port_speed(struct nfp_port *port)
595 {
596 	struct net_device *netdev = port->netdev;
597 	struct nfp_net *nn;
598 	u16 sts;
599 
600 	if (!nfp_netdev_is_nfp_net(netdev))
601 		return;
602 
603 	nn = netdev_priv(netdev);
604 	sts = nn_readw(nn, NFP_NET_CFG_STS);
605 
606 	if (!(sts & NFP_NET_CFG_STS_LINK)) {
607 		nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
608 		return;
609 	}
610 
611 	nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, nfp_net_speed2lr(port->eth_port->speed));
612 }
613 
614 static int
615 nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
616 			struct nfp_eth_table *eth_table)
617 {
618 	struct nfp_eth_table_port *eth_port;
619 
620 	ASSERT_RTNL();
621 
622 	eth_port = nfp_net_find_port(eth_table, port->eth_id);
623 	if (!eth_port) {
624 		set_bit(NFP_PORT_CHANGED, &port->flags);
625 		nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
626 			 port->eth_id);
627 		return -EIO;
628 	}
629 	if (eth_port->override_changed) {
630 		nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id);
631 		port->type = NFP_PORT_INVALID;
632 	}
633 
634 	memcpy(port->eth_port, eth_port, sizeof(*eth_port));
635 	nfp_net_notify_port_speed(port);
636 
637 	return 0;
638 }
639 
640 int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
641 {
642 	struct devlink *devlink = priv_to_devlink(pf);
643 	struct nfp_eth_table *eth_table;
644 	struct nfp_net *nn, *next;
645 	struct nfp_port *port;
646 	int err;
647 
648 	devl_assert_locked(devlink);
649 
650 	/* Check for nfp_net_pci_remove() racing against us */
651 	if (list_empty(&pf->vnics))
652 		return 0;
653 
654 	/* Update state of all ports */
655 	rtnl_lock();
656 	list_for_each_entry(port, &pf->ports, port_list)
657 		clear_bit(NFP_PORT_CHANGED, &port->flags);
658 
659 	eth_table = nfp_eth_read_ports(pf->cpp);
660 	if (!eth_table) {
661 		list_for_each_entry(port, &pf->ports, port_list)
662 			if (__nfp_port_get_eth_port(port))
663 				set_bit(NFP_PORT_CHANGED, &port->flags);
664 		rtnl_unlock();
665 		nfp_err(pf->cpp, "Error refreshing port config!\n");
666 		return -EIO;
667 	}
668 
669 	list_for_each_entry(port, &pf->ports, port_list)
670 		if (__nfp_port_get_eth_port(port))
671 			nfp_net_eth_port_update(pf->cpp, port, eth_table);
672 	rtnl_unlock();
673 
674 	kfree(eth_table);
675 
676 	/* Resync repr state. This may cause reprs to be removed. */
677 	err = nfp_reprs_resync_phys_ports(pf->app);
678 	if (err)
679 		return err;
680 
681 	/* Shoot off the ports which became invalid */
682 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
683 		if (!nn->port || nn->port->type != NFP_PORT_INVALID)
684 			continue;
685 
686 		nfp_net_pf_clean_vnic(pf, nn);
687 		nfp_net_pf_free_vnic(pf, nn);
688 	}
689 
690 	return 0;
691 }
692 
693 static void nfp_net_refresh_vnics(struct work_struct *work)
694 {
695 	struct nfp_pf *pf = container_of(work, struct nfp_pf,
696 					 port_refresh_work);
697 	struct devlink *devlink = priv_to_devlink(pf);
698 
699 	devl_lock(devlink);
700 	nfp_net_refresh_port_table_sync(pf);
701 	devl_unlock(devlink);
702 }
703 
704 void nfp_net_refresh_port_table(struct nfp_port *port)
705 {
706 	struct nfp_pf *pf = port->app->pf;
707 
708 	set_bit(NFP_PORT_CHANGED, &port->flags);
709 
710 	queue_work(pf->wq, &pf->port_refresh_work);
711 }
712 
713 int nfp_net_refresh_eth_port(struct nfp_port *port)
714 {
715 	struct nfp_cpp *cpp = port->app->cpp;
716 	struct nfp_eth_table *eth_table;
717 	int ret;
718 
719 	clear_bit(NFP_PORT_CHANGED, &port->flags);
720 
721 	eth_table = nfp_eth_read_ports(cpp);
722 	if (!eth_table) {
723 		set_bit(NFP_PORT_CHANGED, &port->flags);
724 		nfp_err(cpp, "Error refreshing port state table!\n");
725 		return -EIO;
726 	}
727 
728 	ret = nfp_net_eth_port_update(cpp, port, eth_table);
729 
730 	kfree(eth_table);
731 
732 	return ret;
733 }
734 
735 /*
736  * PCI device functions
737  */
738 int nfp_net_pci_probe(struct nfp_pf *pf)
739 {
740 	struct devlink *devlink = priv_to_devlink(pf);
741 	struct nfp_net_fw_version fw_ver;
742 	u8 __iomem *ctrl_bar, *qc_bar;
743 	int stride;
744 	int err;
745 
746 	INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
747 
748 	if (!pf->rtbl) {
749 		nfp_err(pf->cpp, "No %s, giving up.\n",
750 			pf->fw_loaded ? "symbol table" : "firmware found");
751 		return -EINVAL;
752 	}
753 
754 	pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
755 	if ((int)pf->max_data_vnics < 0)
756 		return pf->max_data_vnics;
757 
758 	err = nfp_net_pci_map_mem(pf);
759 	if (err)
760 		return err;
761 
762 	ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
763 	qc_bar = nfp_cpp_area_iomem(pf->qc_area);
764 	if (!ctrl_bar || !qc_bar) {
765 		err = -EIO;
766 		goto err_unmap;
767 	}
768 
769 	nfp_net_get_fw_version(&fw_ver, ctrl_bar);
770 	if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
771 	    fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
772 		nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
773 			fw_ver.extend, fw_ver.class,
774 			fw_ver.major, fw_ver.minor);
775 		err = -EINVAL;
776 		goto err_unmap;
777 	}
778 
779 	/* Determine stride */
780 	if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
781 		stride = 2;
782 		nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
783 	} else {
784 		switch (fw_ver.major) {
785 		case 1 ... 5:
786 			stride = 4;
787 			break;
788 		default:
789 			nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
790 				fw_ver.extend, fw_ver.class,
791 				fw_ver.major, fw_ver.minor);
792 			err = -EINVAL;
793 			goto err_unmap;
794 		}
795 	}
796 
797 	err = nfp_net_pf_app_init(pf, qc_bar, stride);
798 	if (err)
799 		goto err_unmap;
800 
801 	err = nfp_shared_buf_register(pf);
802 	if (err)
803 		goto err_devlink_unreg;
804 
805 	err = nfp_devlink_params_register(pf);
806 	if (err)
807 		goto err_shared_buf_unreg;
808 
809 	devl_lock(devlink);
810 	pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
811 
812 	/* Allocate the vnics and do basic init */
813 	err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
814 	if (err)
815 		goto err_clean_ddir;
816 
817 	err = nfp_net_pf_init_nsp(pf);
818 	if (err)
819 		goto err_free_vnics;
820 
821 	err = nfp_net_pf_alloc_irqs(pf);
822 	if (err)
823 		goto err_clean_nsp;
824 
825 	err = nfp_net_pf_app_start(pf);
826 	if (err)
827 		goto err_free_irqs;
828 
829 	err = nfp_net_pf_init_vnics(pf);
830 	if (err)
831 		goto err_stop_app;
832 
833 	devl_unlock(devlink);
834 	devlink_register(devlink);
835 
836 	return 0;
837 
838 err_stop_app:
839 	nfp_net_pf_app_stop(pf);
840 err_free_irqs:
841 	nfp_net_pf_free_irqs(pf);
842 err_clean_nsp:
843 	nfp_net_pf_clean_nsp(pf);
844 err_free_vnics:
845 	nfp_net_pf_free_vnics(pf);
846 err_clean_ddir:
847 	nfp_net_debugfs_dir_clean(&pf->ddir);
848 	devl_unlock(devlink);
849 	nfp_devlink_params_unregister(pf);
850 err_shared_buf_unreg:
851 	nfp_shared_buf_unregister(pf);
852 err_devlink_unreg:
853 	cancel_work_sync(&pf->port_refresh_work);
854 	nfp_net_pf_app_clean(pf);
855 err_unmap:
856 	nfp_net_pci_unmap_mem(pf);
857 	return err;
858 }
859 
860 void nfp_net_pci_remove(struct nfp_pf *pf)
861 {
862 	struct devlink *devlink = priv_to_devlink(pf);
863 	struct nfp_net *nn, *next;
864 
865 	devlink_unregister(priv_to_devlink(pf));
866 	devl_lock(devlink);
867 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
868 		if (!nfp_net_is_data_vnic(nn))
869 			continue;
870 		nfp_net_pf_clean_vnic(pf, nn);
871 		nfp_net_pf_free_vnic(pf, nn);
872 	}
873 
874 	nfp_net_pf_clean_nsp(pf);
875 	nfp_net_pf_app_stop(pf);
876 	/* stop app first, to avoid double free of ctrl vNIC's ddir */
877 	nfp_net_debugfs_dir_clean(&pf->ddir);
878 
879 	devl_unlock(devlink);
880 
881 	nfp_devlink_params_unregister(pf);
882 	nfp_shared_buf_unregister(pf);
883 
884 	nfp_net_pf_free_irqs(pf);
885 	nfp_net_pf_app_clean(pf);
886 	nfp_net_pci_unmap_mem(pf);
887 
888 	cancel_work_sync(&pf->port_refresh_work);
889 }
890