xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/nfp_net_main.c (revision 4f139972b489f8bc2c821aa25ac65018d92af3f7)
1 /*
2  * Copyright (C) 2015-2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_net_main.c
36  * Netronome network device driver: Main entry point
37  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38  *          Alejandro Lucero <alejandro.lucero@netronome.com>
39  *          Jason McMullan <jason.mcmullan@netronome.com>
40  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
41  */
42 
43 #include <linux/etherdevice.h>
44 #include <linux/kernel.h>
45 #include <linux/init.h>
46 #include <linux/pci.h>
47 #include <linux/pci_regs.h>
48 #include <linux/msi.h>
49 #include <linux/random.h>
50 #include <linux/rtnetlink.h>
51 
52 #include "nfpcore/nfp.h"
53 #include "nfpcore/nfp_cpp.h"
54 #include "nfpcore/nfp_nffw.h"
55 #include "nfpcore/nfp_nsp.h"
56 #include "nfpcore/nfp6000_pcie.h"
57 
58 #include "nfp_net_ctrl.h"
59 #include "nfp_net.h"
60 #include "nfp_main.h"
61 
62 #define NFP_PF_CSR_SLICE_SIZE	(32 * 1024)
63 
64 static int nfp_is_ready(struct nfp_cpp *cpp)
65 {
66 	const char *cp;
67 	long state;
68 	int err;
69 
70 	cp = nfp_hwinfo_lookup(cpp, "board.state");
71 	if (!cp)
72 		return 0;
73 
74 	err = kstrtol(cp, 0, &state);
75 	if (err < 0)
76 		return 0;
77 
78 	return state == 15;
79 }
80 
81 /**
82  * nfp_net_map_area() - Help function to map an area
83  * @cpp:    NFP CPP handler
84  * @name:   Name for the area
85  * @target: CPP target
86  * @addr:   CPP address
87  * @size:   Size of the area
88  * @area:   Area handle (returned).
89  *
90  * This function is primarily to simplify the code in the main probe
91  * function. To undo the effect of this functions call
92  * @nfp_cpp_area_release_free(*area);
93  *
94  * Return: Pointer to memory mapped area or ERR_PTR
95  */
96 static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
97 				    const char *name, int isl, int target,
98 				    unsigned long long addr, unsigned long size,
99 				    struct nfp_cpp_area **area)
100 {
101 	u8 __iomem *res;
102 	u32 dest;
103 	int err;
104 
105 	dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
106 
107 	*area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
108 	if (!*area) {
109 		err = -EIO;
110 		goto err_area;
111 	}
112 
113 	err = nfp_cpp_area_acquire(*area);
114 	if (err < 0)
115 		goto err_acquire;
116 
117 	res = nfp_cpp_area_iomem(*area);
118 	if (!res) {
119 		err = -EIO;
120 		goto err_map;
121 	}
122 
123 	return res;
124 
125 err_map:
126 	nfp_cpp_area_release(*area);
127 err_acquire:
128 	nfp_cpp_area_free(*area);
129 err_area:
130 	return (u8 __iomem *)ERR_PTR(err);
131 }
132 
133 /**
134  * nfp_net_get_mac_addr() - Get the MAC address.
135  * @nn:       NFP Network structure
136  * @cpp:      NFP CPP handle
137  * @id:	      NFP port id
138  *
139  * First try to get the MAC address from NSP ETH table. If that
140  * fails try HWInfo.  As a last resort generate a random address.
141  */
142 static void
143 nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
144 {
145 	struct nfp_net_dp *dp = &nn->dp;
146 	u8 mac_addr[ETH_ALEN];
147 	const char *mac_str;
148 	char name[32];
149 
150 	if (nn->eth_port) {
151 		ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr);
152 		ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr);
153 		return;
154 	}
155 
156 	snprintf(name, sizeof(name), "eth%d.mac", id);
157 
158 	mac_str = nfp_hwinfo_lookup(cpp, name);
159 	if (!mac_str) {
160 		dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
161 		eth_hw_addr_random(dp->netdev);
162 		return;
163 	}
164 
165 	if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
166 		   &mac_addr[0], &mac_addr[1], &mac_addr[2],
167 		   &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
168 		dev_warn(dp->dev,
169 			 "Can't parse MAC address (%s). Generate.\n", mac_str);
170 		eth_hw_addr_random(dp->netdev);
171 		return;
172 	}
173 
174 	ether_addr_copy(dp->netdev->dev_addr, mac_addr);
175 	ether_addr_copy(dp->netdev->perm_addr, mac_addr);
176 }
177 
178 static struct nfp_eth_table_port *
179 nfp_net_find_port(struct nfp_pf *pf, unsigned int id)
180 {
181 	int i;
182 
183 	for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
184 		if (pf->eth_tbl->ports[i].eth_index == id)
185 			return &pf->eth_tbl->ports[i];
186 
187 	return NULL;
188 }
189 
190 static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
191 {
192 	char name[256];
193 	u16 interface;
194 	int pcie_pf;
195 	int err = 0;
196 	u64 val;
197 
198 	interface = nfp_cpp_interface(pf->cpp);
199 	pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
200 
201 	snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf);
202 
203 	val = nfp_rtsym_read_le(pf->cpp, name, &err);
204 	/* Default to one port */
205 	if (err) {
206 		if (err != -ENOENT)
207 			nfp_err(pf->cpp, "Unable to read adapter port count\n");
208 		val = 1;
209 	}
210 
211 	return val;
212 }
213 
214 static unsigned int
215 nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
216 		     unsigned int stride, u32 start_off, u32 num_off)
217 {
218 	unsigned int i, min_qc, max_qc;
219 
220 	min_qc = readl(ctrl_bar + start_off);
221 	max_qc = min_qc;
222 
223 	for (i = 0; i < pf->num_ports; i++) {
224 		/* To make our lives simpler only accept configuration where
225 		 * queues are allocated to PFs in order (queues of PFn all have
226 		 * indexes lower than PFn+1).
227 		 */
228 		if (max_qc > readl(ctrl_bar + start_off))
229 			return 0;
230 
231 		max_qc = readl(ctrl_bar + start_off);
232 		max_qc += readl(ctrl_bar + num_off) * stride;
233 		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
234 	}
235 
236 	return max_qc - min_qc;
237 }
238 
239 static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
240 {
241 	const struct nfp_rtsym *ctrl_sym;
242 	u8 __iomem *ctrl_bar;
243 	char pf_symbol[256];
244 	u16 interface;
245 	int pcie_pf;
246 
247 	interface = nfp_cpp_interface(pf->cpp);
248 	pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
249 
250 	snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf);
251 
252 	ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
253 	if (!ctrl_sym) {
254 		dev_err(&pf->pdev->dev,
255 			"Failed to find PF BAR0 symbol %s\n", pf_symbol);
256 		return NULL;
257 	}
258 
259 	if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
260 		dev_err(&pf->pdev->dev,
261 			"PF BAR0 too small to contain %d ports\n",
262 			pf->num_ports);
263 		return NULL;
264 	}
265 
266 	ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
267 				    ctrl_sym->domain, ctrl_sym->target,
268 				    ctrl_sym->addr, ctrl_sym->size,
269 				    &pf->ctrl_area);
270 	if (IS_ERR(ctrl_bar)) {
271 		dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
272 			PTR_ERR(ctrl_bar));
273 		return NULL;
274 	}
275 
276 	return ctrl_bar;
277 }
278 
279 static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
280 {
281 	struct nfp_net *nn;
282 
283 	while (!list_empty(&pf->ports)) {
284 		nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
285 		list_del(&nn->port_list);
286 		pf->num_netdevs--;
287 
288 		nfp_net_netdev_free(nn);
289 	}
290 }
291 
292 static struct nfp_net *
293 nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
294 			     void __iomem *tx_bar, void __iomem *rx_bar,
295 			     int stride, struct nfp_net_fw_version *fw_ver,
296 			     struct nfp_eth_table_port *eth_port)
297 {
298 	u32 n_tx_rings, n_rx_rings;
299 	struct nfp_net *nn;
300 
301 	n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
302 	n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
303 
304 	/* Allocate and initialise the netdev */
305 	nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
306 	if (IS_ERR(nn))
307 		return nn;
308 
309 	nn->cpp = pf->cpp;
310 	nn->fw_ver = *fw_ver;
311 	nn->dp.ctrl_bar = ctrl_bar;
312 	nn->tx_bar = tx_bar;
313 	nn->rx_bar = rx_bar;
314 	nn->dp.is_vf = 0;
315 	nn->stride_rx = stride;
316 	nn->stride_tx = stride;
317 	nn->eth_port = eth_port;
318 
319 	return nn;
320 }
321 
322 static int
323 nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
324 			    unsigned int id)
325 {
326 	int err;
327 
328 	/* Get MAC address */
329 	nfp_net_get_mac_addr(nn, pf->cpp, id);
330 
331 	/* Get ME clock frequency from ctrl BAR
332 	 * XXX for now frequency is hardcoded until we figure out how
333 	 * to get the value from nfp-hwinfo into ctrl bar
334 	 */
335 	nn->me_freq_mhz = 1200;
336 
337 	err = nfp_net_netdev_init(nn->dp.netdev);
338 	if (err)
339 		return err;
340 
341 	nfp_net_debugfs_port_add(nn, pf->ddir, id);
342 
343 	nfp_net_info(nn);
344 
345 	return 0;
346 }
347 
348 static int
349 nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
350 			 void __iomem *tx_bar, void __iomem *rx_bar,
351 			 int stride, struct nfp_net_fw_version *fw_ver)
352 {
353 	u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
354 	struct nfp_eth_table_port *eth_port;
355 	struct nfp_net *nn;
356 	unsigned int i;
357 	int err;
358 
359 	prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
360 	prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
361 
362 	for (i = 0; i < pf->num_ports; i++) {
363 		tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
364 		tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
365 		tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
366 		rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
367 		prev_tx_base = tgt_tx_base;
368 		prev_rx_base = tgt_rx_base;
369 
370 		eth_port = nfp_net_find_port(pf, i);
371 		if (eth_port && eth_port->override_changed) {
372 			nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
373 		} else {
374 			nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar,
375 							  rx_bar, stride,
376 							  fw_ver, eth_port);
377 			if (IS_ERR(nn)) {
378 				err = PTR_ERR(nn);
379 				goto err_free_prev;
380 			}
381 			list_add_tail(&nn->port_list, &pf->ports);
382 			pf->num_netdevs++;
383 		}
384 
385 		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
386 	}
387 
388 	if (list_empty(&pf->ports))
389 		return -ENODEV;
390 
391 	return 0;
392 
393 err_free_prev:
394 	nfp_net_pf_free_netdevs(pf);
395 	return err;
396 }
397 
398 static int
399 nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
400 			 void __iomem *ctrl_bar, void __iomem *tx_bar,
401 			 void __iomem *rx_bar, int stride,
402 			 struct nfp_net_fw_version *fw_ver)
403 {
404 	unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
405 	struct nfp_net *nn;
406 	int err;
407 
408 	/* Allocate the netdevs and do basic init */
409 	err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
410 				       stride, fw_ver);
411 	if (err)
412 		return err;
413 
414 	/* Get MSI-X vectors */
415 	wanted_irqs = 0;
416 	list_for_each_entry(nn, &pf->ports, port_list)
417 		wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
418 	pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
419 				  GFP_KERNEL);
420 	if (!pf->irq_entries) {
421 		err = -ENOMEM;
422 		goto err_nn_free;
423 	}
424 
425 	num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
426 				      NFP_NET_MIN_PORT_IRQS * pf->num_netdevs,
427 				      wanted_irqs);
428 	if (!num_irqs) {
429 		nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
430 		err = -ENOMEM;
431 		goto err_vec_free;
432 	}
433 
434 	/* Distribute IRQs to ports */
435 	irqs_left = num_irqs;
436 	ports_left = pf->num_netdevs;
437 	list_for_each_entry(nn, &pf->ports, port_list) {
438 		unsigned int n;
439 
440 		n = DIV_ROUND_UP(irqs_left, ports_left);
441 		nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
442 				    n);
443 		irqs_left -= n;
444 		ports_left--;
445 	}
446 
447 	/* Finish netdev init and register */
448 	id = 0;
449 	list_for_each_entry(nn, &pf->ports, port_list) {
450 		err = nfp_net_pf_init_port_netdev(pf, nn, id);
451 		if (err)
452 			goto err_prev_deinit;
453 
454 		id++;
455 	}
456 
457 	return 0;
458 
459 err_prev_deinit:
460 	list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
461 		nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
462 		nfp_net_netdev_clean(nn->dp.netdev);
463 	}
464 	nfp_net_irqs_disable(pf->pdev);
465 err_vec_free:
466 	kfree(pf->irq_entries);
467 err_nn_free:
468 	nfp_net_pf_free_netdevs(pf);
469 	return err;
470 }
471 
472 static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
473 {
474 	nfp_net_debugfs_dir_clean(&pf->ddir);
475 
476 	nfp_net_irqs_disable(pf->pdev);
477 	kfree(pf->irq_entries);
478 
479 	nfp_cpp_area_release_free(pf->rx_area);
480 	nfp_cpp_area_release_free(pf->tx_area);
481 	nfp_cpp_area_release_free(pf->ctrl_area);
482 }
483 
484 static void nfp_net_refresh_netdevs(struct work_struct *work)
485 {
486 	struct nfp_pf *pf = container_of(work, struct nfp_pf,
487 					 port_refresh_work);
488 	struct nfp_net *nn, *next;
489 
490 	mutex_lock(&pf->port_lock);
491 
492 	/* Check for nfp_net_pci_remove() racing against us */
493 	if (list_empty(&pf->ports))
494 		goto out;
495 
496 	list_for_each_entry_safe(nn, next, &pf->ports, port_list) {
497 		if (!nn->eth_port) {
498 			nfp_warn(pf->cpp, "Warning: port %d not present after reconfig\n",
499 				 nn->eth_port->eth_index);
500 			continue;
501 		}
502 		if (!nn->eth_port->override_changed)
503 			continue;
504 
505 		nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n");
506 
507 		nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
508 		nfp_net_netdev_clean(nn->dp.netdev);
509 
510 		list_del(&nn->port_list);
511 		pf->num_netdevs--;
512 		nfp_net_netdev_free(nn);
513 	}
514 
515 	if (list_empty(&pf->ports))
516 		nfp_net_pci_remove_finish(pf);
517 out:
518 	mutex_unlock(&pf->port_lock);
519 }
520 
521 void nfp_net_refresh_port_config(struct nfp_net *nn)
522 {
523 	struct nfp_pf *pf = pci_get_drvdata(nn->pdev);
524 	struct nfp_eth_table *old_table;
525 
526 	ASSERT_RTNL();
527 
528 	old_table = pf->eth_tbl;
529 
530 	list_for_each_entry(nn, &pf->ports, port_list)
531 		nfp_net_link_changed_read_clear(nn);
532 
533 	pf->eth_tbl = nfp_eth_read_ports(pf->cpp);
534 	if (!pf->eth_tbl) {
535 		pf->eth_tbl = old_table;
536 		nfp_err(pf->cpp, "Error refreshing port config!\n");
537 		return;
538 	}
539 
540 	list_for_each_entry(nn, &pf->ports, port_list)
541 		nn->eth_port = nfp_net_find_port(pf, nn->eth_port->eth_index);
542 
543 	kfree(old_table);
544 
545 	schedule_work(&pf->port_refresh_work);
546 }
547 
548 /*
549  * PCI device functions
550  */
551 int nfp_net_pci_probe(struct nfp_pf *pf)
552 {
553 	u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
554 	u32 total_tx_qcs, total_rx_qcs;
555 	struct nfp_net_fw_version fw_ver;
556 	u32 tx_area_sz, rx_area_sz;
557 	u32 start_q;
558 	int stride;
559 	int err;
560 
561 	INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs);
562 	mutex_init(&pf->port_lock);
563 
564 	/* Verify that the board has completed initialization */
565 	if (!nfp_is_ready(pf->cpp)) {
566 		nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
567 		return -EINVAL;
568 	}
569 
570 	mutex_lock(&pf->port_lock);
571 	pf->num_ports = nfp_net_pf_get_num_ports(pf);
572 
573 	ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
574 	if (!ctrl_bar) {
575 		err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
576 		goto err_unlock;
577 	}
578 
579 	nfp_net_get_fw_version(&fw_ver, ctrl_bar);
580 	if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
581 		nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
582 			fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
583 		err = -EINVAL;
584 		goto err_ctrl_unmap;
585 	}
586 
587 	/* Determine stride */
588 	if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
589 		stride = 2;
590 		nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
591 	} else {
592 		switch (fw_ver.major) {
593 		case 1 ... 4:
594 			stride = 4;
595 			break;
596 		default:
597 			nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
598 				fw_ver.resv, fw_ver.class,
599 				fw_ver.major, fw_ver.minor);
600 			err = -EINVAL;
601 			goto err_ctrl_unmap;
602 		}
603 	}
604 
605 	/* Find how many QC structs need to be mapped */
606 	total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
607 					    NFP_NET_CFG_START_TXQ,
608 					    NFP_NET_CFG_MAX_TXRINGS);
609 	total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
610 					    NFP_NET_CFG_START_RXQ,
611 					    NFP_NET_CFG_MAX_RXRINGS);
612 	if (!total_tx_qcs || !total_rx_qcs) {
613 		nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
614 			total_tx_qcs, total_rx_qcs);
615 		err = -EINVAL;
616 		goto err_ctrl_unmap;
617 	}
618 
619 	tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
620 	rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
621 
622 	/* Map TX queues */
623 	start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
624 	tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
625 				  NFP_PCIE_QUEUE(start_q),
626 				  tx_area_sz, &pf->tx_area);
627 	if (IS_ERR(tx_bar)) {
628 		nfp_err(pf->cpp, "Failed to map TX area.\n");
629 		err = PTR_ERR(tx_bar);
630 		goto err_ctrl_unmap;
631 	}
632 
633 	/* Map RX queues */
634 	start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
635 	rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
636 				  NFP_PCIE_QUEUE(start_q),
637 				  rx_area_sz, &pf->rx_area);
638 	if (IS_ERR(rx_bar)) {
639 		nfp_err(pf->cpp, "Failed to map RX area.\n");
640 		err = PTR_ERR(rx_bar);
641 		goto err_unmap_tx;
642 	}
643 
644 	pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
645 
646 	err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
647 				       stride, &fw_ver);
648 	if (err)
649 		goto err_clean_ddir;
650 
651 	mutex_unlock(&pf->port_lock);
652 
653 	return 0;
654 
655 err_clean_ddir:
656 	nfp_net_debugfs_dir_clean(&pf->ddir);
657 	nfp_cpp_area_release_free(pf->rx_area);
658 err_unmap_tx:
659 	nfp_cpp_area_release_free(pf->tx_area);
660 err_ctrl_unmap:
661 	nfp_cpp_area_release_free(pf->ctrl_area);
662 err_unlock:
663 	mutex_unlock(&pf->port_lock);
664 	return err;
665 }
666 
667 void nfp_net_pci_remove(struct nfp_pf *pf)
668 {
669 	struct nfp_net *nn;
670 
671 	mutex_lock(&pf->port_lock);
672 	if (list_empty(&pf->ports))
673 		goto out;
674 
675 	list_for_each_entry(nn, &pf->ports, port_list) {
676 		nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
677 
678 		nfp_net_netdev_clean(nn->dp.netdev);
679 	}
680 
681 	nfp_net_pf_free_netdevs(pf);
682 
683 	nfp_net_pci_remove_finish(pf);
684 out:
685 	mutex_unlock(&pf->port_lock);
686 
687 	cancel_work_sync(&pf->port_refresh_work);
688 }
689