1 /*
2  * Copyright (C) 2015-2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_net_main.c
36  * Netronome network device driver: Main entry point
37  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38  *          Alejandro Lucero <alejandro.lucero@netronome.com>
39  *          Jason McMullan <jason.mcmullan@netronome.com>
40  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
41  */
42 
43 #include <linux/etherdevice.h>
44 #include <linux/kernel.h>
45 #include <linux/init.h>
46 #include <linux/pci.h>
47 #include <linux/pci_regs.h>
48 #include <linux/msi.h>
49 #include <linux/random.h>
50 
51 #include "nfpcore/nfp.h"
52 #include "nfpcore/nfp_cpp.h"
53 #include "nfpcore/nfp_nffw.h"
54 #include "nfpcore/nfp_nsp_eth.h"
55 #include "nfpcore/nfp6000_pcie.h"
56 
57 #include "nfp_net_ctrl.h"
58 #include "nfp_net.h"
59 #include "nfp_main.h"
60 
61 #define NFP_PF_CSR_SLICE_SIZE	(32 * 1024)
62 
63 static int nfp_is_ready(struct nfp_cpp *cpp)
64 {
65 	const char *cp;
66 	long state;
67 	int err;
68 
69 	cp = nfp_hwinfo_lookup(cpp, "board.state");
70 	if (!cp)
71 		return 0;
72 
73 	err = kstrtol(cp, 0, &state);
74 	if (err < 0)
75 		return 0;
76 
77 	return state == 15;
78 }
79 
80 /**
81  * nfp_net_map_area() - Help function to map an area
82  * @cpp:    NFP CPP handler
83  * @name:   Name for the area
84  * @target: CPP target
85  * @addr:   CPP address
86  * @size:   Size of the area
87  * @area:   Area handle (returned).
88  *
89  * This function is primarily to simplify the code in the main probe
90  * function. To undo the effect of this functions call
91  * @nfp_cpp_area_release_free(*area);
92  *
93  * Return: Pointer to memory mapped area or ERR_PTR
94  */
95 static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
96 				    const char *name, int isl, int target,
97 				    unsigned long long addr, unsigned long size,
98 				    struct nfp_cpp_area **area)
99 {
100 	u8 __iomem *res;
101 	u32 dest;
102 	int err;
103 
104 	dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
105 
106 	*area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
107 	if (!*area) {
108 		err = -EIO;
109 		goto err_area;
110 	}
111 
112 	err = nfp_cpp_area_acquire(*area);
113 	if (err < 0)
114 		goto err_acquire;
115 
116 	res = nfp_cpp_area_iomem(*area);
117 	if (!res) {
118 		err = -EIO;
119 		goto err_map;
120 	}
121 
122 	return res;
123 
124 err_map:
125 	nfp_cpp_area_release(*area);
126 err_acquire:
127 	nfp_cpp_area_free(*area);
128 err_area:
129 	return (u8 __iomem *)ERR_PTR(err);
130 }
131 
132 static void
133 nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
134 			    unsigned int id)
135 {
136 	u8 mac_addr[ETH_ALEN];
137 	const char *mac_str;
138 	char name[32];
139 
140 	snprintf(name, sizeof(name), "eth%d.mac", id);
141 
142 	mac_str = nfp_hwinfo_lookup(cpp, name);
143 	if (!mac_str) {
144 		dev_warn(&nn->pdev->dev,
145 			 "Can't lookup MAC address. Generate\n");
146 		eth_hw_addr_random(nn->netdev);
147 		return;
148 	}
149 
150 	if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
151 		   &mac_addr[0], &mac_addr[1], &mac_addr[2],
152 		   &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
153 		dev_warn(&nn->pdev->dev,
154 			 "Can't parse MAC address (%s). Generate.\n", mac_str);
155 		eth_hw_addr_random(nn->netdev);
156 		return;
157 	}
158 
159 	ether_addr_copy(nn->netdev->dev_addr, mac_addr);
160 	ether_addr_copy(nn->netdev->perm_addr, mac_addr);
161 }
162 
163 /**
164  * nfp_net_get_mac_addr() - Get the MAC address.
165  * @nn:       NFP Network structure
166  * @pf:	      NFP PF device structure
167  * @id:	      NFP port id
168  *
169  * First try to get the MAC address from NSP ETH table. If that
170  * fails try HWInfo.  As a last resort generate a random address.
171  */
172 static void
173 nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
174 {
175 	int i;
176 
177 	for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
178 		if (pf->eth_tbl->ports[i].eth_index == id) {
179 			const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr;
180 
181 			ether_addr_copy(nn->netdev->dev_addr, mac_addr);
182 			ether_addr_copy(nn->netdev->perm_addr, mac_addr);
183 			return;
184 		}
185 
186 	nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
187 }
188 
189 static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
190 {
191 	char name[256];
192 	u16 interface;
193 	int pcie_pf;
194 	int err = 0;
195 	u64 val;
196 
197 	interface = nfp_cpp_interface(pf->cpp);
198 	pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
199 
200 	snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf);
201 
202 	val = nfp_rtsym_read_le(pf->cpp, name, &err);
203 	/* Default to one port */
204 	if (err) {
205 		if (err != -ENOENT)
206 			nfp_err(pf->cpp, "Unable to read adapter port count\n");
207 		val = 1;
208 	}
209 
210 	return val;
211 }
212 
213 static unsigned int
214 nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
215 		     unsigned int stride, u32 start_off, u32 num_off)
216 {
217 	unsigned int i, min_qc, max_qc;
218 
219 	min_qc = readl(ctrl_bar + start_off);
220 	max_qc = min_qc;
221 
222 	for (i = 0; i < pf->num_ports; i++) {
223 		/* To make our lives simpler only accept configuration where
224 		 * queues are allocated to PFs in order (queues of PFn all have
225 		 * indexes lower than PFn+1).
226 		 */
227 		if (max_qc > readl(ctrl_bar + start_off))
228 			return 0;
229 
230 		max_qc = readl(ctrl_bar + start_off);
231 		max_qc += readl(ctrl_bar + num_off) * stride;
232 		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
233 	}
234 
235 	return max_qc - min_qc;
236 }
237 
238 static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
239 {
240 	const struct nfp_rtsym *ctrl_sym;
241 	u8 __iomem *ctrl_bar;
242 	char pf_symbol[256];
243 	u16 interface;
244 	int pcie_pf;
245 
246 	interface = nfp_cpp_interface(pf->cpp);
247 	pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
248 
249 	snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf);
250 
251 	ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
252 	if (!ctrl_sym) {
253 		dev_err(&pf->pdev->dev,
254 			"Failed to find PF BAR0 symbol %s\n", pf_symbol);
255 		return NULL;
256 	}
257 
258 	if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
259 		dev_err(&pf->pdev->dev,
260 			"PF BAR0 too small to contain %d ports\n",
261 			pf->num_ports);
262 		return NULL;
263 	}
264 
265 	ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
266 				    ctrl_sym->domain, ctrl_sym->target,
267 				    ctrl_sym->addr, ctrl_sym->size,
268 				    &pf->ctrl_area);
269 	if (IS_ERR(ctrl_bar)) {
270 		dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
271 			PTR_ERR(ctrl_bar));
272 		return NULL;
273 	}
274 
275 	return ctrl_bar;
276 }
277 
278 static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
279 {
280 	struct nfp_net *nn;
281 
282 	while (!list_empty(&pf->ports)) {
283 		nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
284 		list_del(&nn->port_list);
285 
286 		nfp_net_netdev_free(nn);
287 	}
288 }
289 
290 static struct nfp_net *
291 nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
292 			     void __iomem *tx_bar, void __iomem *rx_bar,
293 			     int stride, struct nfp_net_fw_version *fw_ver)
294 {
295 	u32 n_tx_rings, n_rx_rings;
296 	struct nfp_net *nn;
297 
298 	n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
299 	n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
300 
301 	/* Allocate and initialise the netdev */
302 	nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
303 	if (IS_ERR(nn))
304 		return nn;
305 
306 	nn->cpp = pf->cpp;
307 	nn->fw_ver = *fw_ver;
308 	nn->ctrl_bar = ctrl_bar;
309 	nn->tx_bar = tx_bar;
310 	nn->rx_bar = rx_bar;
311 	nn->is_vf = 0;
312 	nn->stride_rx = stride;
313 	nn->stride_tx = stride;
314 
315 	return nn;
316 }
317 
318 static int
319 nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
320 			    unsigned int id)
321 {
322 	int err;
323 
324 	/* Get MAC address */
325 	nfp_net_get_mac_addr(nn, pf, id);
326 
327 	/* Get ME clock frequency from ctrl BAR
328 	 * XXX for now frequency is hardcoded until we figure out how
329 	 * to get the value from nfp-hwinfo into ctrl bar
330 	 */
331 	nn->me_freq_mhz = 1200;
332 
333 	err = nfp_net_netdev_init(nn->netdev);
334 	if (err)
335 		return err;
336 
337 	nfp_net_debugfs_port_add(nn, pf->ddir, id);
338 
339 	nfp_net_info(nn);
340 
341 	return 0;
342 }
343 
344 static int
345 nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
346 			 void __iomem *tx_bar, void __iomem *rx_bar,
347 			 int stride, struct nfp_net_fw_version *fw_ver)
348 {
349 	u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
350 	struct nfp_net *nn;
351 	unsigned int i;
352 	int err;
353 
354 	prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
355 	prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
356 
357 	for (i = 0; i < pf->num_ports; i++) {
358 		tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
359 		tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
360 		tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
361 		rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
362 		prev_tx_base = tgt_tx_base;
363 		prev_rx_base = tgt_rx_base;
364 
365 		nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar,
366 						  stride, fw_ver);
367 		if (IS_ERR(nn)) {
368 			err = PTR_ERR(nn);
369 			goto err_free_prev;
370 		}
371 		list_add_tail(&nn->port_list, &pf->ports);
372 
373 		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
374 	}
375 
376 	return 0;
377 
378 err_free_prev:
379 	nfp_net_pf_free_netdevs(pf);
380 	return err;
381 }
382 
383 static int
384 nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
385 			 void __iomem *ctrl_bar, void __iomem *tx_bar,
386 			 void __iomem *rx_bar, int stride,
387 			 struct nfp_net_fw_version *fw_ver)
388 {
389 	unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
390 	struct nfp_net *nn;
391 	int err;
392 
393 	/* Allocate the netdevs and do basic init */
394 	err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
395 				       stride, fw_ver);
396 	if (err)
397 		return err;
398 
399 	/* Get MSI-X vectors */
400 	wanted_irqs = 0;
401 	list_for_each_entry(nn, &pf->ports, port_list)
402 		wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
403 	pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
404 				  GFP_KERNEL);
405 	if (!pf->irq_entries) {
406 		err = -ENOMEM;
407 		goto err_nn_free;
408 	}
409 
410 	num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
411 				      NFP_NET_MIN_PORT_IRQS * pf->num_ports,
412 				      wanted_irqs);
413 	if (!num_irqs) {
414 		nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
415 		err = -ENOMEM;
416 		goto err_vec_free;
417 	}
418 
419 	/* Distribute IRQs to ports */
420 	irqs_left = num_irqs;
421 	ports_left = pf->num_ports;
422 	list_for_each_entry(nn, &pf->ports, port_list) {
423 		unsigned int n;
424 
425 		n = DIV_ROUND_UP(irqs_left, ports_left);
426 		nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
427 				    n);
428 		irqs_left -= n;
429 		ports_left--;
430 	}
431 
432 	/* Finish netdev init and register */
433 	id = 0;
434 	list_for_each_entry(nn, &pf->ports, port_list) {
435 		err = nfp_net_pf_init_port_netdev(pf, nn, id);
436 		if (err)
437 			goto err_prev_deinit;
438 
439 		id++;
440 	}
441 
442 	return 0;
443 
444 err_prev_deinit:
445 	list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
446 		nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
447 		nfp_net_netdev_clean(nn->netdev);
448 	}
449 	nfp_net_irqs_disable(pf->pdev);
450 err_vec_free:
451 	kfree(pf->irq_entries);
452 err_nn_free:
453 	nfp_net_pf_free_netdevs(pf);
454 	return err;
455 }
456 
457 /*
458  * PCI device functions
459  */
460 int nfp_net_pci_probe(struct nfp_pf *pf)
461 {
462 	u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
463 	u32 total_tx_qcs, total_rx_qcs;
464 	struct nfp_net_fw_version fw_ver;
465 	u32 tx_area_sz, rx_area_sz;
466 	u32 start_q;
467 	int stride;
468 	int err;
469 
470 	/* Verify that the board has completed initialization */
471 	if (!nfp_is_ready(pf->cpp)) {
472 		nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
473 		return -EINVAL;
474 	}
475 
476 	pf->num_ports = nfp_net_pf_get_num_ports(pf);
477 
478 	ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
479 	if (!ctrl_bar)
480 		return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
481 
482 	nfp_net_get_fw_version(&fw_ver, ctrl_bar);
483 	if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
484 		nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
485 			fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
486 		err = -EINVAL;
487 		goto err_ctrl_unmap;
488 	}
489 
490 	/* Determine stride */
491 	if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
492 		stride = 2;
493 		nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
494 	} else {
495 		switch (fw_ver.major) {
496 		case 1 ... 4:
497 			stride = 4;
498 			break;
499 		default:
500 			nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
501 				fw_ver.resv, fw_ver.class,
502 				fw_ver.major, fw_ver.minor);
503 			err = -EINVAL;
504 			goto err_ctrl_unmap;
505 		}
506 	}
507 
508 	/* Find how many QC structs need to be mapped */
509 	total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
510 					    NFP_NET_CFG_START_TXQ,
511 					    NFP_NET_CFG_MAX_TXRINGS);
512 	total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
513 					    NFP_NET_CFG_START_RXQ,
514 					    NFP_NET_CFG_MAX_RXRINGS);
515 	if (!total_tx_qcs || !total_rx_qcs) {
516 		nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
517 			total_tx_qcs, total_rx_qcs);
518 		err = -EINVAL;
519 		goto err_ctrl_unmap;
520 	}
521 
522 	tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
523 	rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
524 
525 	/* Map TX queues */
526 	start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
527 	tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
528 				  NFP_PCIE_QUEUE(start_q),
529 				  tx_area_sz, &pf->tx_area);
530 	if (IS_ERR(tx_bar)) {
531 		nfp_err(pf->cpp, "Failed to map TX area.\n");
532 		err = PTR_ERR(tx_bar);
533 		goto err_ctrl_unmap;
534 	}
535 
536 	/* Map RX queues */
537 	start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
538 	rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
539 				  NFP_PCIE_QUEUE(start_q),
540 				  rx_area_sz, &pf->rx_area);
541 	if (IS_ERR(rx_bar)) {
542 		nfp_err(pf->cpp, "Failed to map RX area.\n");
543 		err = PTR_ERR(rx_bar);
544 		goto err_unmap_tx;
545 	}
546 
547 	pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
548 
549 	err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
550 				       stride, &fw_ver);
551 	if (err)
552 		goto err_clean_ddir;
553 
554 	return 0;
555 
556 err_clean_ddir:
557 	nfp_net_debugfs_dir_clean(&pf->ddir);
558 	nfp_cpp_area_release_free(pf->rx_area);
559 err_unmap_tx:
560 	nfp_cpp_area_release_free(pf->tx_area);
561 err_ctrl_unmap:
562 	nfp_cpp_area_release_free(pf->ctrl_area);
563 	return err;
564 }
565 
566 void nfp_net_pci_remove(struct nfp_pf *pf)
567 {
568 	struct nfp_net *nn;
569 
570 	list_for_each_entry(nn, &pf->ports, port_list) {
571 		nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
572 
573 		nfp_net_netdev_clean(nn->netdev);
574 	}
575 
576 	nfp_net_pf_free_netdevs(pf);
577 
578 	nfp_net_debugfs_dir_clean(&pf->ddir);
579 
580 	nfp_net_irqs_disable(pf->pdev);
581 	kfree(pf->irq_entries);
582 
583 	nfp_cpp_area_release_free(pf->rx_area);
584 	nfp_cpp_area_release_free(pf->tx_area);
585 	nfp_cpp_area_release_free(pf->ctrl_area);
586 }
587