xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/nfp_net_main.c (revision 023e41632e065d49bcbe31b3c4b336217f96a271)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3 
4 /*
5  * nfp_net_main.c
6  * Netronome network device driver: Main entry point
7  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8  *          Alejandro Lucero <alejandro.lucero@netronome.com>
9  *          Jason McMullan <jason.mcmullan@netronome.com>
10  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
11  */
12 
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/lockdep.h>
17 #include <linux/pci.h>
18 #include <linux/pci_regs.h>
19 #include <linux/msi.h>
20 #include <linux/random.h>
21 #include <linux/rtnetlink.h>
22 
23 #include "nfpcore/nfp.h"
24 #include "nfpcore/nfp_cpp.h"
25 #include "nfpcore/nfp_nffw.h"
26 #include "nfpcore/nfp_nsp.h"
27 #include "nfpcore/nfp6000_pcie.h"
28 #include "nfp_app.h"
29 #include "nfp_net_ctrl.h"
30 #include "nfp_net_sriov.h"
31 #include "nfp_net.h"
32 #include "nfp_main.h"
33 #include "nfp_port.h"
34 
35 #define NFP_PF_CSR_SLICE_SIZE	(32 * 1024)
36 
37 /**
38  * nfp_net_get_mac_addr() - Get the MAC address.
39  * @pf:       NFP PF handle
40  * @netdev:   net_device to set MAC address on
41  * @port:     NFP port structure
42  *
43  * First try to get the MAC address from NSP ETH table. If that
44  * fails generate a random address.
45  */
46 void
47 nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
48 		     struct nfp_port *port)
49 {
50 	struct nfp_eth_table_port *eth_port;
51 
52 	eth_port = __nfp_port_get_eth_port(port);
53 	if (!eth_port) {
54 		eth_hw_addr_random(netdev);
55 		return;
56 	}
57 
58 	ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
59 	ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
60 }
61 
62 static struct nfp_eth_table_port *
63 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
64 {
65 	int i;
66 
67 	for (i = 0; eth_tbl && i < eth_tbl->count; i++)
68 		if (eth_tbl->ports[i].index == index)
69 			return &eth_tbl->ports[i];
70 
71 	return NULL;
72 }
73 
74 static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
75 {
76 	return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
77 }
78 
79 static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
80 {
81 	return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
82 					  NFP_APP_CORE_NIC);
83 }
84 
85 static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
86 {
87 	if (nfp_net_is_data_vnic(nn))
88 		nfp_app_vnic_free(pf->app, nn);
89 	nfp_port_free(nn->port);
90 	list_del(&nn->vnic_list);
91 	pf->num_vnics--;
92 	nfp_net_free(nn);
93 }
94 
95 static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
96 {
97 	struct nfp_net *nn, *next;
98 
99 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
100 		if (nfp_net_is_data_vnic(nn))
101 			nfp_net_pf_free_vnic(pf, nn);
102 }
103 
104 static struct nfp_net *
105 nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
106 		      void __iomem *ctrl_bar, void __iomem *qc_bar,
107 		      int stride, unsigned int id)
108 {
109 	u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
110 	struct nfp_net *nn;
111 	int err;
112 
113 	tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
114 	rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
115 	n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
116 	n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
117 
118 	/* Allocate and initialise the vNIC */
119 	nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
120 			   n_tx_rings, n_rx_rings);
121 	if (IS_ERR(nn))
122 		return nn;
123 
124 	nn->app = pf->app;
125 	nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
126 	nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
127 	nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
128 	nn->dp.is_vf = 0;
129 	nn->stride_rx = stride;
130 	nn->stride_tx = stride;
131 
132 	if (needs_netdev) {
133 		err = nfp_app_vnic_alloc(pf->app, nn, id);
134 		if (err) {
135 			nfp_net_free(nn);
136 			return ERR_PTR(err);
137 		}
138 	}
139 
140 	pf->num_vnics++;
141 	list_add_tail(&nn->vnic_list, &pf->vnics);
142 
143 	return nn;
144 }
145 
146 static int
147 nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
148 {
149 	int err;
150 
151 	nn->id = id;
152 
153 	err = nfp_net_init(nn);
154 	if (err)
155 		return err;
156 
157 	nfp_net_debugfs_vnic_add(nn, pf->ddir);
158 
159 	if (nn->port) {
160 		err = nfp_devlink_port_register(pf->app, nn->port);
161 		if (err)
162 			goto err_dfs_clean;
163 	}
164 
165 	nfp_net_info(nn);
166 
167 	if (nfp_net_is_data_vnic(nn)) {
168 		err = nfp_app_vnic_init(pf->app, nn);
169 		if (err)
170 			goto err_devlink_port_clean;
171 	}
172 
173 	return 0;
174 
175 err_devlink_port_clean:
176 	if (nn->port)
177 		nfp_devlink_port_unregister(nn->port);
178 err_dfs_clean:
179 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
180 	nfp_net_clean(nn);
181 	return err;
182 }
183 
184 static int
185 nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
186 		       void __iomem *qc_bar, int stride)
187 {
188 	struct nfp_net *nn;
189 	unsigned int i;
190 	int err;
191 
192 	for (i = 0; i < pf->max_data_vnics; i++) {
193 		nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
194 					   stride, i);
195 		if (IS_ERR(nn)) {
196 			err = PTR_ERR(nn);
197 			goto err_free_prev;
198 		}
199 
200 		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
201 
202 		/* Kill the vNIC if app init marked it as invalid */
203 		if (nn->port && nn->port->type == NFP_PORT_INVALID) {
204 			nfp_net_pf_free_vnic(pf, nn);
205 			continue;
206 		}
207 	}
208 
209 	if (list_empty(&pf->vnics))
210 		return -ENODEV;
211 
212 	return 0;
213 
214 err_free_prev:
215 	nfp_net_pf_free_vnics(pf);
216 	return err;
217 }
218 
219 static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
220 {
221 	if (nfp_net_is_data_vnic(nn))
222 		nfp_app_vnic_clean(pf->app, nn);
223 	if (nn->port)
224 		nfp_devlink_port_unregister(nn->port);
225 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
226 	nfp_net_clean(nn);
227 }
228 
229 static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
230 {
231 	unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
232 	struct nfp_net *nn;
233 
234 	/* Get MSI-X vectors */
235 	wanted_irqs = 0;
236 	list_for_each_entry(nn, &pf->vnics, vnic_list)
237 		wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
238 	pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
239 				  GFP_KERNEL);
240 	if (!pf->irq_entries)
241 		return -ENOMEM;
242 
243 	num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
244 				      NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
245 				      wanted_irqs);
246 	if (!num_irqs) {
247 		nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
248 		kfree(pf->irq_entries);
249 		return -ENOMEM;
250 	}
251 
252 	/* Distribute IRQs to vNICs */
253 	irqs_left = num_irqs;
254 	vnics_left = pf->num_vnics;
255 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
256 		unsigned int n;
257 
258 		n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
259 			DIV_ROUND_UP(irqs_left, vnics_left));
260 		nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
261 				    n);
262 		irqs_left -= n;
263 		vnics_left--;
264 	}
265 
266 	return 0;
267 }
268 
269 static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
270 {
271 	nfp_net_irqs_disable(pf->pdev);
272 	kfree(pf->irq_entries);
273 }
274 
275 static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
276 {
277 	struct nfp_net *nn;
278 	unsigned int id;
279 	int err;
280 
281 	/* Finish vNIC init and register */
282 	id = 0;
283 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
284 		if (!nfp_net_is_data_vnic(nn))
285 			continue;
286 		err = nfp_net_pf_init_vnic(pf, nn, id);
287 		if (err)
288 			goto err_prev_deinit;
289 
290 		id++;
291 	}
292 
293 	return 0;
294 
295 err_prev_deinit:
296 	list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
297 		if (nfp_net_is_data_vnic(nn))
298 			nfp_net_pf_clean_vnic(pf, nn);
299 	return err;
300 }
301 
302 static int
303 nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
304 {
305 	u8 __iomem *ctrl_bar;
306 	int err;
307 
308 	pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
309 	if (IS_ERR(pf->app))
310 		return PTR_ERR(pf->app);
311 
312 	mutex_lock(&pf->lock);
313 	err = nfp_app_init(pf->app);
314 	mutex_unlock(&pf->lock);
315 	if (err)
316 		goto err_free;
317 
318 	if (!nfp_app_needs_ctrl_vnic(pf->app))
319 		return 0;
320 
321 	ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
322 				    NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar);
323 	if (IS_ERR(ctrl_bar)) {
324 		nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
325 		err = PTR_ERR(ctrl_bar);
326 		goto err_app_clean;
327 	}
328 
329 	pf->ctrl_vnic =	nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
330 					      stride, 0);
331 	if (IS_ERR(pf->ctrl_vnic)) {
332 		err = PTR_ERR(pf->ctrl_vnic);
333 		goto err_unmap;
334 	}
335 
336 	return 0;
337 
338 err_unmap:
339 	nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
340 err_app_clean:
341 	mutex_lock(&pf->lock);
342 	nfp_app_clean(pf->app);
343 	mutex_unlock(&pf->lock);
344 err_free:
345 	nfp_app_free(pf->app);
346 	pf->app = NULL;
347 	return err;
348 }
349 
350 static void nfp_net_pf_app_clean(struct nfp_pf *pf)
351 {
352 	if (pf->ctrl_vnic) {
353 		nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
354 		nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
355 	}
356 
357 	mutex_lock(&pf->lock);
358 	nfp_app_clean(pf->app);
359 	mutex_unlock(&pf->lock);
360 
361 	nfp_app_free(pf->app);
362 	pf->app = NULL;
363 }
364 
365 static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
366 {
367 	int err;
368 
369 	if (!pf->ctrl_vnic)
370 		return 0;
371 	err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
372 	if (err)
373 		return err;
374 
375 	err = nfp_ctrl_open(pf->ctrl_vnic);
376 	if (err)
377 		goto err_clean_ctrl;
378 
379 	return 0;
380 
381 err_clean_ctrl:
382 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
383 	return err;
384 }
385 
386 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
387 {
388 	if (!pf->ctrl_vnic)
389 		return;
390 	nfp_ctrl_close(pf->ctrl_vnic);
391 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
392 }
393 
394 static int nfp_net_pf_app_start(struct nfp_pf *pf)
395 {
396 	int err;
397 
398 	err = nfp_net_pf_app_start_ctrl(pf);
399 	if (err)
400 		return err;
401 
402 	err = nfp_app_start(pf->app, pf->ctrl_vnic);
403 	if (err)
404 		goto err_ctrl_stop;
405 
406 	if (pf->num_vfs) {
407 		err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
408 		if (err)
409 			goto err_app_stop;
410 	}
411 
412 	return 0;
413 
414 err_app_stop:
415 	nfp_app_stop(pf->app);
416 err_ctrl_stop:
417 	nfp_net_pf_app_stop_ctrl(pf);
418 	return err;
419 }
420 
421 static void nfp_net_pf_app_stop(struct nfp_pf *pf)
422 {
423 	if (pf->num_vfs)
424 		nfp_app_sriov_disable(pf->app);
425 	nfp_app_stop(pf->app);
426 	nfp_net_pf_app_stop_ctrl(pf);
427 }
428 
429 static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
430 {
431 	if (pf->vfcfg_tbl2_area)
432 		nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
433 	if (pf->vf_cfg_bar)
434 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
435 	if (pf->mac_stats_bar)
436 		nfp_cpp_area_release_free(pf->mac_stats_bar);
437 	nfp_cpp_area_release_free(pf->qc_area);
438 	nfp_cpp_area_release_free(pf->data_vnic_bar);
439 }
440 
441 static int nfp_net_pci_map_mem(struct nfp_pf *pf)
442 {
443 	u32 min_size, cpp_id;
444 	u8 __iomem *mem;
445 	int err;
446 
447 	min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
448 	mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
449 			       min_size, &pf->data_vnic_bar);
450 	if (IS_ERR(mem)) {
451 		nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
452 		return PTR_ERR(mem);
453 	}
454 
455 	if (pf->eth_tbl) {
456 		min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
457 		pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
458 						  "net.macstats", min_size,
459 						  &pf->mac_stats_bar);
460 		if (IS_ERR(pf->mac_stats_mem)) {
461 			if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
462 				err = PTR_ERR(pf->mac_stats_mem);
463 				goto err_unmap_ctrl;
464 			}
465 			pf->mac_stats_mem = NULL;
466 		}
467 	}
468 
469 	pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar",
470 					  NFP_NET_CFG_BAR_SZ * pf->limit_vfs,
471 					  &pf->vf_cfg_bar);
472 	if (IS_ERR(pf->vf_cfg_mem)) {
473 		if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
474 			err = PTR_ERR(pf->vf_cfg_mem);
475 			goto err_unmap_mac_stats;
476 		}
477 		pf->vf_cfg_mem = NULL;
478 	}
479 
480 	min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ;
481 	pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2",
482 					  "_pf%d_net_vf_cfg2",
483 					  min_size, &pf->vfcfg_tbl2_area);
484 	if (IS_ERR(pf->vfcfg_tbl2)) {
485 		if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) {
486 			err = PTR_ERR(pf->vfcfg_tbl2);
487 			goto err_unmap_vf_cfg;
488 		}
489 		pf->vfcfg_tbl2 = NULL;
490 	}
491 
492 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
493 	mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0),
494 			       NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area);
495 	if (IS_ERR(mem)) {
496 		nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
497 		err = PTR_ERR(mem);
498 		goto err_unmap_vfcfg_tbl2;
499 	}
500 
501 	return 0;
502 
503 err_unmap_vfcfg_tbl2:
504 	if (pf->vfcfg_tbl2_area)
505 		nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
506 err_unmap_vf_cfg:
507 	if (pf->vf_cfg_bar)
508 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
509 err_unmap_mac_stats:
510 	if (pf->mac_stats_bar)
511 		nfp_cpp_area_release_free(pf->mac_stats_bar);
512 err_unmap_ctrl:
513 	nfp_cpp_area_release_free(pf->data_vnic_bar);
514 	return err;
515 }
516 
517 static int
518 nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
519 			struct nfp_eth_table *eth_table)
520 {
521 	struct nfp_eth_table_port *eth_port;
522 
523 	ASSERT_RTNL();
524 
525 	eth_port = nfp_net_find_port(eth_table, port->eth_id);
526 	if (!eth_port) {
527 		set_bit(NFP_PORT_CHANGED, &port->flags);
528 		nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
529 			 port->eth_id);
530 		return -EIO;
531 	}
532 	if (eth_port->override_changed) {
533 		nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id);
534 		port->type = NFP_PORT_INVALID;
535 	}
536 
537 	memcpy(port->eth_port, eth_port, sizeof(*eth_port));
538 
539 	return 0;
540 }
541 
542 int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
543 {
544 	struct nfp_eth_table *eth_table;
545 	struct nfp_net *nn, *next;
546 	struct nfp_port *port;
547 	int err;
548 
549 	lockdep_assert_held(&pf->lock);
550 
551 	/* Check for nfp_net_pci_remove() racing against us */
552 	if (list_empty(&pf->vnics))
553 		return 0;
554 
555 	/* Update state of all ports */
556 	rtnl_lock();
557 	list_for_each_entry(port, &pf->ports, port_list)
558 		clear_bit(NFP_PORT_CHANGED, &port->flags);
559 
560 	eth_table = nfp_eth_read_ports(pf->cpp);
561 	if (!eth_table) {
562 		list_for_each_entry(port, &pf->ports, port_list)
563 			if (__nfp_port_get_eth_port(port))
564 				set_bit(NFP_PORT_CHANGED, &port->flags);
565 		rtnl_unlock();
566 		nfp_err(pf->cpp, "Error refreshing port config!\n");
567 		return -EIO;
568 	}
569 
570 	list_for_each_entry(port, &pf->ports, port_list)
571 		if (__nfp_port_get_eth_port(port))
572 			nfp_net_eth_port_update(pf->cpp, port, eth_table);
573 	rtnl_unlock();
574 
575 	kfree(eth_table);
576 
577 	/* Resync repr state. This may cause reprs to be removed. */
578 	err = nfp_reprs_resync_phys_ports(pf->app);
579 	if (err)
580 		return err;
581 
582 	/* Shoot off the ports which became invalid */
583 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
584 		if (!nn->port || nn->port->type != NFP_PORT_INVALID)
585 			continue;
586 
587 		nfp_net_pf_clean_vnic(pf, nn);
588 		nfp_net_pf_free_vnic(pf, nn);
589 	}
590 
591 	return 0;
592 }
593 
594 static void nfp_net_refresh_vnics(struct work_struct *work)
595 {
596 	struct nfp_pf *pf = container_of(work, struct nfp_pf,
597 					 port_refresh_work);
598 
599 	mutex_lock(&pf->lock);
600 	nfp_net_refresh_port_table_sync(pf);
601 	mutex_unlock(&pf->lock);
602 }
603 
604 void nfp_net_refresh_port_table(struct nfp_port *port)
605 {
606 	struct nfp_pf *pf = port->app->pf;
607 
608 	set_bit(NFP_PORT_CHANGED, &port->flags);
609 
610 	queue_work(pf->wq, &pf->port_refresh_work);
611 }
612 
613 int nfp_net_refresh_eth_port(struct nfp_port *port)
614 {
615 	struct nfp_cpp *cpp = port->app->cpp;
616 	struct nfp_eth_table *eth_table;
617 	int ret;
618 
619 	clear_bit(NFP_PORT_CHANGED, &port->flags);
620 
621 	eth_table = nfp_eth_read_ports(cpp);
622 	if (!eth_table) {
623 		set_bit(NFP_PORT_CHANGED, &port->flags);
624 		nfp_err(cpp, "Error refreshing port state table!\n");
625 		return -EIO;
626 	}
627 
628 	ret = nfp_net_eth_port_update(cpp, port, eth_table);
629 
630 	kfree(eth_table);
631 
632 	return ret;
633 }
634 
635 /*
636  * PCI device functions
637  */
638 int nfp_net_pci_probe(struct nfp_pf *pf)
639 {
640 	struct devlink *devlink = priv_to_devlink(pf);
641 	struct nfp_net_fw_version fw_ver;
642 	u8 __iomem *ctrl_bar, *qc_bar;
643 	int stride;
644 	int err;
645 
646 	INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
647 
648 	if (!pf->rtbl) {
649 		nfp_err(pf->cpp, "No %s, giving up.\n",
650 			pf->fw_loaded ? "symbol table" : "firmware found");
651 		return -EINVAL;
652 	}
653 
654 	pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
655 	if ((int)pf->max_data_vnics < 0)
656 		return pf->max_data_vnics;
657 
658 	err = nfp_net_pci_map_mem(pf);
659 	if (err)
660 		return err;
661 
662 	ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
663 	qc_bar = nfp_cpp_area_iomem(pf->qc_area);
664 	if (!ctrl_bar || !qc_bar) {
665 		err = -EIO;
666 		goto err_unmap;
667 	}
668 
669 	nfp_net_get_fw_version(&fw_ver, ctrl_bar);
670 	if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
671 		nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
672 			fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
673 		err = -EINVAL;
674 		goto err_unmap;
675 	}
676 
677 	/* Determine stride */
678 	if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
679 		stride = 2;
680 		nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
681 	} else {
682 		switch (fw_ver.major) {
683 		case 1 ... 5:
684 			stride = 4;
685 			break;
686 		default:
687 			nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
688 				fw_ver.resv, fw_ver.class,
689 				fw_ver.major, fw_ver.minor);
690 			err = -EINVAL;
691 			goto err_unmap;
692 		}
693 	}
694 
695 	err = nfp_net_pf_app_init(pf, qc_bar, stride);
696 	if (err)
697 		goto err_unmap;
698 
699 	err = devlink_register(devlink, &pf->pdev->dev);
700 	if (err)
701 		goto err_app_clean;
702 
703 	err = nfp_shared_buf_register(pf);
704 	if (err)
705 		goto err_devlink_unreg;
706 
707 	mutex_lock(&pf->lock);
708 	pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
709 
710 	/* Allocate the vnics and do basic init */
711 	err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
712 	if (err)
713 		goto err_clean_ddir;
714 
715 	err = nfp_net_pf_alloc_irqs(pf);
716 	if (err)
717 		goto err_free_vnics;
718 
719 	err = nfp_net_pf_app_start(pf);
720 	if (err)
721 		goto err_free_irqs;
722 
723 	err = nfp_net_pf_init_vnics(pf);
724 	if (err)
725 		goto err_stop_app;
726 
727 	mutex_unlock(&pf->lock);
728 
729 	return 0;
730 
731 err_stop_app:
732 	nfp_net_pf_app_stop(pf);
733 err_free_irqs:
734 	nfp_net_pf_free_irqs(pf);
735 err_free_vnics:
736 	nfp_net_pf_free_vnics(pf);
737 err_clean_ddir:
738 	nfp_net_debugfs_dir_clean(&pf->ddir);
739 	mutex_unlock(&pf->lock);
740 	nfp_shared_buf_unregister(pf);
741 err_devlink_unreg:
742 	cancel_work_sync(&pf->port_refresh_work);
743 	devlink_unregister(devlink);
744 err_app_clean:
745 	nfp_net_pf_app_clean(pf);
746 err_unmap:
747 	nfp_net_pci_unmap_mem(pf);
748 	return err;
749 }
750 
751 void nfp_net_pci_remove(struct nfp_pf *pf)
752 {
753 	struct nfp_net *nn, *next;
754 
755 	mutex_lock(&pf->lock);
756 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
757 		if (!nfp_net_is_data_vnic(nn))
758 			continue;
759 		nfp_net_pf_clean_vnic(pf, nn);
760 		nfp_net_pf_free_vnic(pf, nn);
761 	}
762 
763 	nfp_net_pf_app_stop(pf);
764 	/* stop app first, to avoid double free of ctrl vNIC's ddir */
765 	nfp_net_debugfs_dir_clean(&pf->ddir);
766 
767 	mutex_unlock(&pf->lock);
768 
769 	nfp_shared_buf_unregister(pf);
770 	devlink_unregister(priv_to_devlink(pf));
771 
772 	nfp_net_pf_free_irqs(pf);
773 	nfp_net_pf_app_clean(pf);
774 	nfp_net_pci_unmap_mem(pf);
775 
776 	cancel_work_sync(&pf->port_refresh_work);
777 }
778