1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3 
4 /*
5  * nfp_net_main.c
6  * Netronome network device driver: Main entry point
7  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8  *          Alejandro Lucero <alejandro.lucero@netronome.com>
9  *          Jason McMullan <jason.mcmullan@netronome.com>
10  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
11  */
12 
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/lockdep.h>
17 #include <linux/pci.h>
18 #include <linux/pci_regs.h>
19 #include <linux/msi.h>
20 #include <linux/random.h>
21 #include <linux/rtnetlink.h>
22 
23 #include "nfpcore/nfp.h"
24 #include "nfpcore/nfp_cpp.h"
25 #include "nfpcore/nfp_dev.h"
26 #include "nfpcore/nfp_nffw.h"
27 #include "nfpcore/nfp_nsp.h"
28 #include "nfpcore/nfp6000_pcie.h"
29 #include "nfp_app.h"
30 #include "nfp_net_ctrl.h"
31 #include "nfp_net_sriov.h"
32 #include "nfp_net.h"
33 #include "nfp_main.h"
34 #include "nfp_port.h"
35 
36 #define NFP_PF_CSR_SLICE_SIZE	(32 * 1024)
37 
38 /**
39  * nfp_net_get_mac_addr() - Get the MAC address.
40  * @pf:       NFP PF handle
41  * @netdev:   net_device to set MAC address on
42  * @port:     NFP port structure
43  *
44  * First try to get the MAC address from NSP ETH table. If that
45  * fails generate a random address.
46  */
47 void
48 nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
49 		     struct nfp_port *port)
50 {
51 	struct nfp_eth_table_port *eth_port;
52 
53 	eth_port = __nfp_port_get_eth_port(port);
54 	if (!eth_port) {
55 		eth_hw_addr_random(netdev);
56 		return;
57 	}
58 
59 	eth_hw_addr_set(netdev, eth_port->mac_addr);
60 	ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
61 }
62 
63 static struct nfp_eth_table_port *
64 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
65 {
66 	int i;
67 
68 	for (i = 0; eth_tbl && i < eth_tbl->count; i++)
69 		if (eth_tbl->ports[i].index == index)
70 			return &eth_tbl->ports[i];
71 
72 	return NULL;
73 }
74 
75 static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
76 {
77 	return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
78 }
79 
80 static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
81 {
82 	if (nfp_net_is_data_vnic(nn))
83 		nfp_app_vnic_free(pf->app, nn);
84 	nfp_port_free(nn->port);
85 	list_del(&nn->vnic_list);
86 	pf->num_vnics--;
87 	nfp_net_free(nn);
88 }
89 
90 static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
91 {
92 	struct nfp_net *nn, *next;
93 
94 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
95 		if (nfp_net_is_data_vnic(nn))
96 			nfp_net_pf_free_vnic(pf, nn);
97 }
98 
99 static struct nfp_net *
100 nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
101 		      void __iomem *ctrl_bar, void __iomem *qc_bar,
102 		      int stride, unsigned int id)
103 {
104 	u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
105 	struct nfp_net *nn;
106 	int err;
107 
108 	tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
109 	rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
110 	n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
111 	n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
112 
113 	/* Allocate and initialise the vNIC */
114 	nn = nfp_net_alloc(pf->pdev, pf->dev_info, ctrl_bar, needs_netdev,
115 			   n_tx_rings, n_rx_rings);
116 	if (IS_ERR(nn))
117 		return nn;
118 
119 	nn->app = pf->app;
120 	nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
121 	nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
122 	nn->dp.is_vf = 0;
123 	nn->stride_rx = stride;
124 	nn->stride_tx = stride;
125 
126 	if (needs_netdev) {
127 		err = nfp_app_vnic_alloc(pf->app, nn, id);
128 		if (err) {
129 			nfp_net_free(nn);
130 			return ERR_PTR(err);
131 		}
132 	}
133 
134 	pf->num_vnics++;
135 	list_add_tail(&nn->vnic_list, &pf->vnics);
136 
137 	return nn;
138 }
139 
140 static int
141 nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
142 {
143 	int err;
144 
145 	nn->id = id;
146 
147 	if (nn->port) {
148 		err = nfp_devlink_port_register(pf->app, nn->port);
149 		if (err)
150 			return err;
151 	}
152 
153 	err = nfp_net_init(nn);
154 	if (err)
155 		goto err_devlink_port_clean;
156 
157 	nfp_net_debugfs_vnic_add(nn, pf->ddir);
158 
159 	if (nn->port)
160 		nfp_devlink_port_type_eth_set(nn->port);
161 
162 	nfp_net_info(nn);
163 
164 	if (nfp_net_is_data_vnic(nn)) {
165 		err = nfp_app_vnic_init(pf->app, nn);
166 		if (err)
167 			goto err_devlink_port_type_clean;
168 	}
169 
170 	return 0;
171 
172 err_devlink_port_type_clean:
173 	if (nn->port)
174 		nfp_devlink_port_type_clear(nn->port);
175 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
176 	nfp_net_clean(nn);
177 err_devlink_port_clean:
178 	if (nn->port)
179 		nfp_devlink_port_unregister(nn->port);
180 	return err;
181 }
182 
183 static int
184 nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
185 		       void __iomem *qc_bar, int stride)
186 {
187 	struct nfp_net *nn;
188 	unsigned int i;
189 	int err;
190 
191 	for (i = 0; i < pf->max_data_vnics; i++) {
192 		nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
193 					   stride, i);
194 		if (IS_ERR(nn)) {
195 			err = PTR_ERR(nn);
196 			goto err_free_prev;
197 		}
198 
199 		if (nn->port)
200 			nn->port->link_cb = nfp_net_refresh_port_table;
201 
202 		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
203 
204 		/* Kill the vNIC if app init marked it as invalid */
205 		if (nn->port && nn->port->type == NFP_PORT_INVALID)
206 			nfp_net_pf_free_vnic(pf, nn);
207 	}
208 
209 	if (list_empty(&pf->vnics))
210 		return -ENODEV;
211 
212 	return 0;
213 
214 err_free_prev:
215 	nfp_net_pf_free_vnics(pf);
216 	return err;
217 }
218 
219 static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
220 {
221 	if (nfp_net_is_data_vnic(nn))
222 		nfp_app_vnic_clean(pf->app, nn);
223 	if (nn->port)
224 		nfp_devlink_port_type_clear(nn->port);
225 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
226 	nfp_net_clean(nn);
227 	if (nn->port)
228 		nfp_devlink_port_unregister(nn->port);
229 }
230 
231 static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
232 {
233 	unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
234 	struct nfp_net *nn;
235 
236 	/* Get MSI-X vectors */
237 	wanted_irqs = 0;
238 	list_for_each_entry(nn, &pf->vnics, vnic_list)
239 		wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
240 	pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
241 				  GFP_KERNEL);
242 	if (!pf->irq_entries)
243 		return -ENOMEM;
244 
245 	num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
246 				      NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
247 				      wanted_irqs);
248 	if (!num_irqs) {
249 		nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
250 		kfree(pf->irq_entries);
251 		return -ENOMEM;
252 	}
253 
254 	/* Distribute IRQs to vNICs */
255 	irqs_left = num_irqs;
256 	vnics_left = pf->num_vnics;
257 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
258 		unsigned int n;
259 
260 		n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
261 			DIV_ROUND_UP(irqs_left, vnics_left));
262 		nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
263 				    n);
264 		irqs_left -= n;
265 		vnics_left--;
266 	}
267 
268 	return 0;
269 }
270 
271 static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
272 {
273 	nfp_net_irqs_disable(pf->pdev);
274 	kfree(pf->irq_entries);
275 }
276 
277 static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
278 {
279 	struct nfp_net *nn;
280 	unsigned int id;
281 	int err;
282 
283 	/* Finish vNIC init and register */
284 	id = 0;
285 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
286 		if (!nfp_net_is_data_vnic(nn))
287 			continue;
288 		err = nfp_net_pf_init_vnic(pf, nn, id);
289 		if (err)
290 			goto err_prev_deinit;
291 
292 		id++;
293 	}
294 
295 	return 0;
296 
297 err_prev_deinit:
298 	list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
299 		if (nfp_net_is_data_vnic(nn))
300 			nfp_net_pf_clean_vnic(pf, nn);
301 	return err;
302 }
303 
304 static int
305 nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
306 {
307 	struct devlink *devlink = priv_to_devlink(pf);
308 	u8 __iomem *ctrl_bar;
309 	int err;
310 
311 	pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
312 	if (IS_ERR(pf->app))
313 		return PTR_ERR(pf->app);
314 
315 	devl_lock(devlink);
316 	err = nfp_app_init(pf->app);
317 	devl_unlock(devlink);
318 	if (err)
319 		goto err_free;
320 
321 	if (!nfp_app_needs_ctrl_vnic(pf->app))
322 		return 0;
323 
324 	ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
325 				    NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar);
326 	if (IS_ERR(ctrl_bar)) {
327 		nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
328 		err = PTR_ERR(ctrl_bar);
329 		goto err_app_clean;
330 	}
331 
332 	pf->ctrl_vnic =	nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
333 					      stride, 0);
334 	if (IS_ERR(pf->ctrl_vnic)) {
335 		err = PTR_ERR(pf->ctrl_vnic);
336 		goto err_unmap;
337 	}
338 
339 	return 0;
340 
341 err_unmap:
342 	nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
343 err_app_clean:
344 	devl_lock(devlink);
345 	nfp_app_clean(pf->app);
346 	devl_unlock(devlink);
347 err_free:
348 	nfp_app_free(pf->app);
349 	pf->app = NULL;
350 	return err;
351 }
352 
353 static void nfp_net_pf_app_clean(struct nfp_pf *pf)
354 {
355 	struct devlink *devlink = priv_to_devlink(pf);
356 
357 	if (pf->ctrl_vnic) {
358 		nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
359 		nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
360 	}
361 
362 	devl_lock(devlink);
363 	nfp_app_clean(pf->app);
364 	devl_unlock(devlink);
365 
366 	nfp_app_free(pf->app);
367 	pf->app = NULL;
368 }
369 
370 static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
371 {
372 	int err;
373 
374 	if (!pf->ctrl_vnic)
375 		return 0;
376 	err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
377 	if (err)
378 		return err;
379 
380 	err = nfp_ctrl_open(pf->ctrl_vnic);
381 	if (err)
382 		goto err_clean_ctrl;
383 
384 	return 0;
385 
386 err_clean_ctrl:
387 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
388 	return err;
389 }
390 
391 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
392 {
393 	if (!pf->ctrl_vnic)
394 		return;
395 	nfp_ctrl_close(pf->ctrl_vnic);
396 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
397 }
398 
399 static int nfp_net_pf_app_start(struct nfp_pf *pf)
400 {
401 	int err;
402 
403 	err = nfp_net_pf_app_start_ctrl(pf);
404 	if (err)
405 		return err;
406 
407 	err = nfp_app_start(pf->app, pf->ctrl_vnic);
408 	if (err)
409 		goto err_ctrl_stop;
410 
411 	if (pf->num_vfs) {
412 		err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
413 		if (err)
414 			goto err_app_stop;
415 	}
416 
417 	return 0;
418 
419 err_app_stop:
420 	nfp_app_stop(pf->app);
421 err_ctrl_stop:
422 	nfp_net_pf_app_stop_ctrl(pf);
423 	return err;
424 }
425 
426 static void nfp_net_pf_app_stop(struct nfp_pf *pf)
427 {
428 	if (pf->num_vfs)
429 		nfp_app_sriov_disable(pf->app);
430 	nfp_app_stop(pf->app);
431 	nfp_net_pf_app_stop_ctrl(pf);
432 }
433 
434 static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
435 {
436 	if (pf->vfcfg_tbl2_area)
437 		nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
438 	if (pf->vf_cfg_bar)
439 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
440 	if (pf->mac_stats_bar)
441 		nfp_cpp_area_release_free(pf->mac_stats_bar);
442 	nfp_cpp_area_release_free(pf->qc_area);
443 	nfp_cpp_area_release_free(pf->data_vnic_bar);
444 }
445 
446 static int nfp_net_pci_map_mem(struct nfp_pf *pf)
447 {
448 	u32 min_size, cpp_id;
449 	u8 __iomem *mem;
450 	int err;
451 
452 	min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
453 	mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
454 			       min_size, &pf->data_vnic_bar);
455 	if (IS_ERR(mem)) {
456 		nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
457 		return PTR_ERR(mem);
458 	}
459 
460 	if (pf->eth_tbl) {
461 		min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
462 		pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
463 						  "net.macstats", min_size,
464 						  &pf->mac_stats_bar);
465 		if (IS_ERR(pf->mac_stats_mem)) {
466 			if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
467 				err = PTR_ERR(pf->mac_stats_mem);
468 				goto err_unmap_ctrl;
469 			}
470 			pf->mac_stats_mem = NULL;
471 		}
472 	}
473 
474 	pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar",
475 					  NFP_NET_CFG_BAR_SZ * pf->limit_vfs,
476 					  &pf->vf_cfg_bar);
477 	if (IS_ERR(pf->vf_cfg_mem)) {
478 		if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
479 			err = PTR_ERR(pf->vf_cfg_mem);
480 			goto err_unmap_mac_stats;
481 		}
482 		pf->vf_cfg_mem = NULL;
483 	}
484 
485 	min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ;
486 	pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2",
487 					  "_pf%d_net_vf_cfg2",
488 					  min_size, &pf->vfcfg_tbl2_area);
489 	if (IS_ERR(pf->vfcfg_tbl2)) {
490 		if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) {
491 			err = PTR_ERR(pf->vfcfg_tbl2);
492 			goto err_unmap_vf_cfg;
493 		}
494 		pf->vfcfg_tbl2 = NULL;
495 	}
496 
497 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
498 	mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id,
499 			       nfp_qcp_queue_offset(pf->dev_info, 0),
500 			       pf->dev_info->qc_area_sz, &pf->qc_area);
501 	if (IS_ERR(mem)) {
502 		nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
503 		err = PTR_ERR(mem);
504 		goto err_unmap_vfcfg_tbl2;
505 	}
506 
507 	return 0;
508 
509 err_unmap_vfcfg_tbl2:
510 	if (pf->vfcfg_tbl2_area)
511 		nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
512 err_unmap_vf_cfg:
513 	if (pf->vf_cfg_bar)
514 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
515 err_unmap_mac_stats:
516 	if (pf->mac_stats_bar)
517 		nfp_cpp_area_release_free(pf->mac_stats_bar);
518 err_unmap_ctrl:
519 	nfp_cpp_area_release_free(pf->data_vnic_bar);
520 	return err;
521 }
522 
523 static const unsigned int lr_to_speed[] = {
524 	[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED]	= 0,
525 	[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]	= SPEED_UNKNOWN,
526 	[NFP_NET_CFG_STS_LINK_RATE_1G]		= SPEED_1000,
527 	[NFP_NET_CFG_STS_LINK_RATE_10G]		= SPEED_10000,
528 	[NFP_NET_CFG_STS_LINK_RATE_25G]		= SPEED_25000,
529 	[NFP_NET_CFG_STS_LINK_RATE_40G]		= SPEED_40000,
530 	[NFP_NET_CFG_STS_LINK_RATE_50G]		= SPEED_50000,
531 	[NFP_NET_CFG_STS_LINK_RATE_100G]	= SPEED_100000,
532 };
533 
534 unsigned int nfp_net_lr2speed(unsigned int linkrate)
535 {
536 	if (linkrate < ARRAY_SIZE(lr_to_speed))
537 		return lr_to_speed[linkrate];
538 
539 	return SPEED_UNKNOWN;
540 }
541 
542 unsigned int nfp_net_speed2lr(unsigned int speed)
543 {
544 	int i;
545 
546 	for (i = 0; i < ARRAY_SIZE(lr_to_speed); i++) {
547 		if (speed == lr_to_speed[i])
548 			return i;
549 	}
550 
551 	return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
552 }
553 
554 static void nfp_net_notify_port_speed(struct nfp_port *port)
555 {
556 	struct net_device *netdev = port->netdev;
557 	struct nfp_net *nn;
558 	u16 sts;
559 
560 	if (!nfp_netdev_is_nfp_net(netdev))
561 		return;
562 
563 	nn = netdev_priv(netdev);
564 	sts = nn_readw(nn, NFP_NET_CFG_STS);
565 
566 	if (!(sts & NFP_NET_CFG_STS_LINK)) {
567 		nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
568 		return;
569 	}
570 
571 	nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, nfp_net_speed2lr(port->eth_port->speed));
572 }
573 
574 static int
575 nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
576 			struct nfp_eth_table *eth_table)
577 {
578 	struct nfp_eth_table_port *eth_port;
579 
580 	ASSERT_RTNL();
581 
582 	eth_port = nfp_net_find_port(eth_table, port->eth_id);
583 	if (!eth_port) {
584 		set_bit(NFP_PORT_CHANGED, &port->flags);
585 		nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
586 			 port->eth_id);
587 		return -EIO;
588 	}
589 	if (eth_port->override_changed) {
590 		nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id);
591 		port->type = NFP_PORT_INVALID;
592 	}
593 
594 	memcpy(port->eth_port, eth_port, sizeof(*eth_port));
595 	nfp_net_notify_port_speed(port);
596 
597 	return 0;
598 }
599 
600 int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
601 {
602 	struct devlink *devlink = priv_to_devlink(pf);
603 	struct nfp_eth_table *eth_table;
604 	struct nfp_net *nn, *next;
605 	struct nfp_port *port;
606 	int err;
607 
608 	devl_assert_locked(devlink);
609 
610 	/* Check for nfp_net_pci_remove() racing against us */
611 	if (list_empty(&pf->vnics))
612 		return 0;
613 
614 	/* Update state of all ports */
615 	rtnl_lock();
616 	list_for_each_entry(port, &pf->ports, port_list)
617 		clear_bit(NFP_PORT_CHANGED, &port->flags);
618 
619 	eth_table = nfp_eth_read_ports(pf->cpp);
620 	if (!eth_table) {
621 		list_for_each_entry(port, &pf->ports, port_list)
622 			if (__nfp_port_get_eth_port(port))
623 				set_bit(NFP_PORT_CHANGED, &port->flags);
624 		rtnl_unlock();
625 		nfp_err(pf->cpp, "Error refreshing port config!\n");
626 		return -EIO;
627 	}
628 
629 	list_for_each_entry(port, &pf->ports, port_list)
630 		if (__nfp_port_get_eth_port(port))
631 			nfp_net_eth_port_update(pf->cpp, port, eth_table);
632 	rtnl_unlock();
633 
634 	kfree(eth_table);
635 
636 	/* Resync repr state. This may cause reprs to be removed. */
637 	err = nfp_reprs_resync_phys_ports(pf->app);
638 	if (err)
639 		return err;
640 
641 	/* Shoot off the ports which became invalid */
642 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
643 		if (!nn->port || nn->port->type != NFP_PORT_INVALID)
644 			continue;
645 
646 		nfp_net_pf_clean_vnic(pf, nn);
647 		nfp_net_pf_free_vnic(pf, nn);
648 	}
649 
650 	return 0;
651 }
652 
653 static void nfp_net_refresh_vnics(struct work_struct *work)
654 {
655 	struct nfp_pf *pf = container_of(work, struct nfp_pf,
656 					 port_refresh_work);
657 	struct devlink *devlink = priv_to_devlink(pf);
658 
659 	devl_lock(devlink);
660 	nfp_net_refresh_port_table_sync(pf);
661 	devl_unlock(devlink);
662 }
663 
664 void nfp_net_refresh_port_table(struct nfp_port *port)
665 {
666 	struct nfp_pf *pf = port->app->pf;
667 
668 	set_bit(NFP_PORT_CHANGED, &port->flags);
669 
670 	queue_work(pf->wq, &pf->port_refresh_work);
671 }
672 
673 int nfp_net_refresh_eth_port(struct nfp_port *port)
674 {
675 	struct nfp_cpp *cpp = port->app->cpp;
676 	struct nfp_eth_table *eth_table;
677 	int ret;
678 
679 	clear_bit(NFP_PORT_CHANGED, &port->flags);
680 
681 	eth_table = nfp_eth_read_ports(cpp);
682 	if (!eth_table) {
683 		set_bit(NFP_PORT_CHANGED, &port->flags);
684 		nfp_err(cpp, "Error refreshing port state table!\n");
685 		return -EIO;
686 	}
687 
688 	ret = nfp_net_eth_port_update(cpp, port, eth_table);
689 
690 	kfree(eth_table);
691 
692 	return ret;
693 }
694 
695 /*
696  * PCI device functions
697  */
698 int nfp_net_pci_probe(struct nfp_pf *pf)
699 {
700 	struct devlink *devlink = priv_to_devlink(pf);
701 	struct nfp_net_fw_version fw_ver;
702 	u8 __iomem *ctrl_bar, *qc_bar;
703 	int stride;
704 	int err;
705 
706 	INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
707 
708 	if (!pf->rtbl) {
709 		nfp_err(pf->cpp, "No %s, giving up.\n",
710 			pf->fw_loaded ? "symbol table" : "firmware found");
711 		return -EINVAL;
712 	}
713 
714 	pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
715 	if ((int)pf->max_data_vnics < 0)
716 		return pf->max_data_vnics;
717 
718 	err = nfp_net_pci_map_mem(pf);
719 	if (err)
720 		return err;
721 
722 	ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
723 	qc_bar = nfp_cpp_area_iomem(pf->qc_area);
724 	if (!ctrl_bar || !qc_bar) {
725 		err = -EIO;
726 		goto err_unmap;
727 	}
728 
729 	nfp_net_get_fw_version(&fw_ver, ctrl_bar);
730 	if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
731 	    fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
732 		nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
733 			fw_ver.extend, fw_ver.class,
734 			fw_ver.major, fw_ver.minor);
735 		err = -EINVAL;
736 		goto err_unmap;
737 	}
738 
739 	/* Determine stride */
740 	if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
741 		stride = 2;
742 		nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
743 	} else {
744 		switch (fw_ver.major) {
745 		case 1 ... 5:
746 			stride = 4;
747 			break;
748 		default:
749 			nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
750 				fw_ver.extend, fw_ver.class,
751 				fw_ver.major, fw_ver.minor);
752 			err = -EINVAL;
753 			goto err_unmap;
754 		}
755 	}
756 
757 	err = nfp_net_pf_app_init(pf, qc_bar, stride);
758 	if (err)
759 		goto err_unmap;
760 
761 	err = nfp_shared_buf_register(pf);
762 	if (err)
763 		goto err_devlink_unreg;
764 
765 	err = nfp_devlink_params_register(pf);
766 	if (err)
767 		goto err_shared_buf_unreg;
768 
769 	devl_lock(devlink);
770 	pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
771 
772 	/* Allocate the vnics and do basic init */
773 	err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
774 	if (err)
775 		goto err_clean_ddir;
776 
777 	err = nfp_net_pf_alloc_irqs(pf);
778 	if (err)
779 		goto err_free_vnics;
780 
781 	err = nfp_net_pf_app_start(pf);
782 	if (err)
783 		goto err_free_irqs;
784 
785 	err = nfp_net_pf_init_vnics(pf);
786 	if (err)
787 		goto err_stop_app;
788 
789 	devl_unlock(devlink);
790 	devlink_register(devlink);
791 
792 	return 0;
793 
794 err_stop_app:
795 	nfp_net_pf_app_stop(pf);
796 err_free_irqs:
797 	nfp_net_pf_free_irqs(pf);
798 err_free_vnics:
799 	nfp_net_pf_free_vnics(pf);
800 err_clean_ddir:
801 	nfp_net_debugfs_dir_clean(&pf->ddir);
802 	devl_unlock(devlink);
803 	nfp_devlink_params_unregister(pf);
804 err_shared_buf_unreg:
805 	nfp_shared_buf_unregister(pf);
806 err_devlink_unreg:
807 	cancel_work_sync(&pf->port_refresh_work);
808 	nfp_net_pf_app_clean(pf);
809 err_unmap:
810 	nfp_net_pci_unmap_mem(pf);
811 	return err;
812 }
813 
814 void nfp_net_pci_remove(struct nfp_pf *pf)
815 {
816 	struct devlink *devlink = priv_to_devlink(pf);
817 	struct nfp_net *nn, *next;
818 
819 	devlink_unregister(priv_to_devlink(pf));
820 	devl_lock(devlink);
821 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
822 		if (!nfp_net_is_data_vnic(nn))
823 			continue;
824 		nfp_net_pf_clean_vnic(pf, nn);
825 		nfp_net_pf_free_vnic(pf, nn);
826 	}
827 
828 	nfp_net_pf_app_stop(pf);
829 	/* stop app first, to avoid double free of ctrl vNIC's ddir */
830 	nfp_net_debugfs_dir_clean(&pf->ddir);
831 
832 	devl_unlock(devlink);
833 
834 	nfp_devlink_params_unregister(pf);
835 	nfp_shared_buf_unregister(pf);
836 
837 	nfp_net_pf_free_irqs(pf);
838 	nfp_net_pf_app_clean(pf);
839 	nfp_net_pci_unmap_mem(pf);
840 
841 	cancel_work_sync(&pf->port_refresh_work);
842 }
843