1 /*
2  * This file is based on code from OCTEON SDK by Cavium Networks.
3  *
4  * Copyright (c) 2003-2007 Cavium Networks
5  *
6  * This file is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License, Version 2, as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_net.h>
20 
21 #include <net/dst.h>
22 
23 #include <asm/octeon/octeon.h>
24 
25 #include "ethernet-defines.h"
26 #include "octeon-ethernet.h"
27 #include "ethernet-mem.h"
28 #include "ethernet-rx.h"
29 #include "ethernet-tx.h"
30 #include "ethernet-mdio.h"
31 #include "ethernet-util.h"
32 
33 #include <asm/octeon/cvmx-pip.h>
34 #include <asm/octeon/cvmx-pko.h>
35 #include <asm/octeon/cvmx-fau.h>
36 #include <asm/octeon/cvmx-ipd.h>
37 #include <asm/octeon/cvmx-helper.h>
38 
39 #include <asm/octeon/cvmx-gmxx-defs.h>
40 #include <asm/octeon/cvmx-smix-defs.h>
41 
42 static int num_packet_buffers = 1024;
43 module_param(num_packet_buffers, int, 0444);
44 MODULE_PARM_DESC(num_packet_buffers, "\n"
45 	"\tNumber of packet buffers to allocate and store in the\n"
46 	"\tFPA. By default, 1024 packet buffers are used.\n");
47 
48 int pow_receive_group = 15;
49 module_param(pow_receive_group, int, 0444);
50 MODULE_PARM_DESC(pow_receive_group, "\n"
51 	"\tPOW group to receive packets from. All ethernet hardware\n"
52 	"\twill be configured to send incoming packets to this POW\n"
53 	"\tgroup. Also any other software can submit packets to this\n"
54 	"\tgroup for the kernel to process.");
55 
56 int pow_send_group = -1;
57 module_param(pow_send_group, int, 0644);
58 MODULE_PARM_DESC(pow_send_group, "\n"
59 	"\tPOW group to send packets to other software on. This\n"
60 	"\tcontrols the creation of the virtual device pow0.\n"
61 	"\talways_use_pow also depends on this value.");
62 
63 int always_use_pow;
64 module_param(always_use_pow, int, 0444);
65 MODULE_PARM_DESC(always_use_pow, "\n"
66 	"\tWhen set, always send to the pow group. This will cause\n"
67 	"\tpackets sent to real ethernet devices to be sent to the\n"
68 	"\tPOW group instead of the hardware. Unless some other\n"
69 	"\tapplication changes the config, packets will still be\n"
70 	"\treceived from the low level hardware. Use this option\n"
71 	"\tto allow a CVMX app to intercept all packets from the\n"
72 	"\tlinux kernel. You must specify pow_send_group along with\n"
73 	"\tthis option.");
74 
75 char pow_send_list[128] = "";
76 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
77 MODULE_PARM_DESC(pow_send_list, "\n"
78 	"\tComma separated list of ethernet devices that should use the\n"
79 	"\tPOW for transmit instead of the actual ethernet hardware. This\n"
80 	"\tis a per port version of always_use_pow. always_use_pow takes\n"
81 	"\tprecedence over this list. For example, setting this to\n"
82 	"\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
83 	"\tusing the pow_send_group.");
84 
85 int rx_napi_weight = 32;
86 module_param(rx_napi_weight, int, 0444);
87 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
88 
89 
90 /*
91  * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
92  *
93  * Set to one right before cvm_oct_poll_queue is destroyed.
94  */
95 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
96 
97 /*
98  * Array of every ethernet device owned by this driver indexed by
99  * the ipd input port number.
100  */
101 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
102 
103 u64 cvm_oct_tx_poll_interval;
104 
105 static void cvm_oct_rx_refill_worker(struct work_struct *work);
106 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
107 
108 static void cvm_oct_rx_refill_worker(struct work_struct *work)
109 {
110 	/*
111 	 * FPA 0 may have been drained, try to refill it if we need
112 	 * more than num_packet_buffers / 2, otherwise normal receive
113 	 * processing will refill it.  If it were drained, no packets
114 	 * could be received so cvm_oct_napi_poll would never be
115 	 * invoked to do the refill.
116 	 */
117 	cvm_oct_rx_refill_pool(num_packet_buffers / 2);
118 
119 	if (!atomic_read(&cvm_oct_poll_queue_stopping))
120 		schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
121 }
122 
123 static void cvm_oct_periodic_worker(struct work_struct *work)
124 {
125 	struct octeon_ethernet *priv = container_of(work,
126 						    struct octeon_ethernet,
127 						    port_periodic_work.work);
128 
129 	if (priv->poll)
130 		priv->poll(cvm_oct_device[priv->port]);
131 
132 	cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
133 						cvm_oct_device[priv->port]);
134 
135 	if (!atomic_read(&cvm_oct_poll_queue_stopping))
136 		schedule_delayed_work(&priv->port_periodic_work, HZ);
137 }
138 
139 static void cvm_oct_configure_common_hw(void)
140 {
141 	/* Setup the FPA */
142 	cvmx_fpa_enable();
143 	cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
144 			     num_packet_buffers);
145 	cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
146 			     num_packet_buffers);
147 	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
148 		cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
149 				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
150 
151 #ifdef __LITTLE_ENDIAN
152 	{
153 		union cvmx_ipd_ctl_status ipd_ctl_status;
154 
155 		ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
156 		ipd_ctl_status.s.pkt_lend = 1;
157 		ipd_ctl_status.s.wqe_lend = 1;
158 		cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
159 	}
160 #endif
161 
162 	cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
163 }
164 
165 /**
166  * cvm_oct_free_work- Free a work queue entry
167  *
168  * @work_queue_entry: Work queue entry to free
169  *
170  * Returns Zero on success, Negative on failure.
171  */
172 int cvm_oct_free_work(void *work_queue_entry)
173 {
174 	cvmx_wqe_t *work = work_queue_entry;
175 
176 	int segments = work->word2.s.bufs;
177 	union cvmx_buf_ptr segment_ptr = work->packet_ptr;
178 
179 	while (segments--) {
180 		union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
181 			cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
182 		if (unlikely(!segment_ptr.s.i))
183 			cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
184 				      segment_ptr.s.pool,
185 				      CVMX_FPA_PACKET_POOL_SIZE / 128);
186 		segment_ptr = next_ptr;
187 	}
188 	cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
189 
190 	return 0;
191 }
192 EXPORT_SYMBOL(cvm_oct_free_work);
193 
194 /**
195  * cvm_oct_common_get_stats - get the low level ethernet statistics
196  * @dev:    Device to get the statistics from
197  *
198  * Returns Pointer to the statistics
199  */
200 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
201 {
202 	cvmx_pip_port_status_t rx_status;
203 	cvmx_pko_port_status_t tx_status;
204 	struct octeon_ethernet *priv = netdev_priv(dev);
205 
206 	if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
207 		if (octeon_is_simulation()) {
208 			/* The simulator doesn't support statistics */
209 			memset(&rx_status, 0, sizeof(rx_status));
210 			memset(&tx_status, 0, sizeof(tx_status));
211 		} else {
212 			cvmx_pip_get_port_status(priv->port, 1, &rx_status);
213 			cvmx_pko_get_port_status(priv->port, 1, &tx_status);
214 		}
215 
216 		priv->stats.rx_packets += rx_status.inb_packets;
217 		priv->stats.tx_packets += tx_status.packets;
218 		priv->stats.rx_bytes += rx_status.inb_octets;
219 		priv->stats.tx_bytes += tx_status.octets;
220 		priv->stats.multicast += rx_status.multicast_packets;
221 		priv->stats.rx_crc_errors += rx_status.inb_errors;
222 		priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
223 		priv->stats.rx_dropped += rx_status.dropped_packets;
224 	}
225 
226 	return &priv->stats;
227 }
228 
229 /**
230  * cvm_oct_common_change_mtu - change the link MTU
231  * @dev:     Device to change
232  * @new_mtu: The new MTU
233  *
234  * Returns Zero on success
235  */
236 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
237 {
238 	struct octeon_ethernet *priv = netdev_priv(dev);
239 	int interface = INTERFACE(priv->port);
240 	int index = INDEX(priv->port);
241 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
242 	int vlan_bytes = 4;
243 #else
244 	int vlan_bytes = 0;
245 #endif
246 
247 	/*
248 	 * Limit the MTU to make sure the ethernet packets are between
249 	 * 64 bytes and 65535 bytes.
250 	 */
251 	if ((new_mtu + 14 + 4 + vlan_bytes < 64) ||
252 	    (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
253 		pr_err("MTU must be between %d and %d.\n",
254 		       64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
255 		return -EINVAL;
256 	}
257 	dev->mtu = new_mtu;
258 
259 	if ((interface < 2) &&
260 	    (cvmx_helper_interface_get_mode(interface) !=
261 		CVMX_HELPER_INTERFACE_MODE_SPI)) {
262 		/* Add ethernet header and FCS, and VLAN if configured. */
263 		int max_packet = new_mtu + 14 + 4 + vlan_bytes;
264 
265 		if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
266 		    OCTEON_IS_MODEL(OCTEON_CN58XX)) {
267 			/* Signal errors on packets larger than the MTU */
268 			cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
269 				       max_packet);
270 		} else {
271 			/*
272 			 * Set the hardware to truncate packets larger
273 			 * than the MTU and smaller the 64 bytes.
274 			 */
275 			union cvmx_pip_frm_len_chkx frm_len_chk;
276 
277 			frm_len_chk.u64 = 0;
278 			frm_len_chk.s.minlen = 64;
279 			frm_len_chk.s.maxlen = max_packet;
280 			cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
281 				       frm_len_chk.u64);
282 		}
283 		/*
284 		 * Set the hardware to truncate packets larger than
285 		 * the MTU. The jabber register must be set to a
286 		 * multiple of 8 bytes, so round up.
287 		 */
288 		cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
289 			       (max_packet + 7) & ~7u);
290 	}
291 	return 0;
292 }
293 
294 /**
295  * cvm_oct_common_set_multicast_list - set the multicast list
296  * @dev:    Device to work on
297  */
298 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
299 {
300 	union cvmx_gmxx_prtx_cfg gmx_cfg;
301 	struct octeon_ethernet *priv = netdev_priv(dev);
302 	int interface = INTERFACE(priv->port);
303 	int index = INDEX(priv->port);
304 
305 	if ((interface < 2) &&
306 	    (cvmx_helper_interface_get_mode(interface) !=
307 		CVMX_HELPER_INTERFACE_MODE_SPI)) {
308 		union cvmx_gmxx_rxx_adr_ctl control;
309 
310 		control.u64 = 0;
311 		control.s.bcst = 1;	/* Allow broadcast MAC addresses */
312 
313 		if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
314 		    (dev->flags & IFF_PROMISC))
315 			/* Force accept multicast packets */
316 			control.s.mcst = 2;
317 		else
318 			/* Force reject multicast packets */
319 			control.s.mcst = 1;
320 
321 		if (dev->flags & IFF_PROMISC)
322 			/*
323 			 * Reject matches if promisc. Since CAM is
324 			 * shut off, should accept everything.
325 			 */
326 			control.s.cam_mode = 0;
327 		else
328 			/* Filter packets based on the CAM */
329 			control.s.cam_mode = 1;
330 
331 		gmx_cfg.u64 =
332 		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
333 		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
334 			       gmx_cfg.u64 & ~1ull);
335 
336 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
337 			       control.u64);
338 		if (dev->flags & IFF_PROMISC)
339 			cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
340 				       (index, interface), 0);
341 		else
342 			cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
343 				       (index, interface), 1);
344 
345 		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
346 			       gmx_cfg.u64);
347 	}
348 }
349 
350 static int cvm_oct_set_mac_filter(struct net_device *dev)
351 {
352 	struct octeon_ethernet *priv = netdev_priv(dev);
353 	union cvmx_gmxx_prtx_cfg gmx_cfg;
354 	int interface = INTERFACE(priv->port);
355 	int index = INDEX(priv->port);
356 
357 	if ((interface < 2) &&
358 	    (cvmx_helper_interface_get_mode(interface) !=
359 		CVMX_HELPER_INTERFACE_MODE_SPI)) {
360 		int i;
361 		u8 *ptr = dev->dev_addr;
362 		u64 mac = 0;
363 
364 		for (i = 0; i < 6; i++)
365 			mac = (mac << 8) | (u64)ptr[i];
366 
367 		gmx_cfg.u64 =
368 		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
369 		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
370 			       gmx_cfg.u64 & ~1ull);
371 
372 		cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
373 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
374 			       ptr[0]);
375 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
376 			       ptr[1]);
377 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
378 			       ptr[2]);
379 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
380 			       ptr[3]);
381 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
382 			       ptr[4]);
383 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
384 			       ptr[5]);
385 		cvm_oct_common_set_multicast_list(dev);
386 		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
387 			       gmx_cfg.u64);
388 	}
389 	return 0;
390 }
391 
392 /**
393  * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
394  * @dev:    The device in question.
395  * @addr:   Socket address.
396  *
397  * Returns Zero on success
398  */
399 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
400 {
401 	int r = eth_mac_addr(dev, addr);
402 
403 	if (r)
404 		return r;
405 	return cvm_oct_set_mac_filter(dev);
406 }
407 
408 /**
409  * cvm_oct_common_init - per network device initialization
410  * @dev:    Device to initialize
411  *
412  * Returns Zero on success
413  */
414 int cvm_oct_common_init(struct net_device *dev)
415 {
416 	struct octeon_ethernet *priv = netdev_priv(dev);
417 	const u8 *mac = NULL;
418 
419 	if (priv->of_node)
420 		mac = of_get_mac_address(priv->of_node);
421 
422 	if (mac)
423 		ether_addr_copy(dev->dev_addr, mac);
424 	else
425 		eth_hw_addr_random(dev);
426 
427 	/*
428 	 * Force the interface to use the POW send if always_use_pow
429 	 * was specified or it is in the pow send list.
430 	 */
431 	if ((pow_send_group != -1) &&
432 	    (always_use_pow || strstr(pow_send_list, dev->name)))
433 		priv->queue = -1;
434 
435 	if (priv->queue != -1)
436 		dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
437 
438 	/* We do our own locking, Linux doesn't need to */
439 	dev->features |= NETIF_F_LLTX;
440 	dev->ethtool_ops = &cvm_oct_ethtool_ops;
441 
442 	cvm_oct_set_mac_filter(dev);
443 	dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
444 
445 	/*
446 	 * Zero out stats for port so we won't mistakenly show
447 	 * counters from the bootloader.
448 	 */
449 	memset(dev->netdev_ops->ndo_get_stats(dev), 0,
450 	       sizeof(struct net_device_stats));
451 
452 	if (dev->netdev_ops->ndo_stop)
453 		dev->netdev_ops->ndo_stop(dev);
454 
455 	return 0;
456 }
457 
458 void cvm_oct_common_uninit(struct net_device *dev)
459 {
460 	struct octeon_ethernet *priv = netdev_priv(dev);
461 
462 	if (priv->phydev)
463 		phy_disconnect(priv->phydev);
464 }
465 
466 int cvm_oct_common_open(struct net_device *dev,
467 			void (*link_poll)(struct net_device *))
468 {
469 	union cvmx_gmxx_prtx_cfg gmx_cfg;
470 	struct octeon_ethernet *priv = netdev_priv(dev);
471 	int interface = INTERFACE(priv->port);
472 	int index = INDEX(priv->port);
473 	cvmx_helper_link_info_t link_info;
474 	int rv;
475 
476 	rv = cvm_oct_phy_setup_device(dev);
477 	if (rv)
478 		return rv;
479 
480 	gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
481 	gmx_cfg.s.en = 1;
482 	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
483 
484 	if (octeon_is_simulation())
485 		return 0;
486 
487 	if (priv->phydev) {
488 		int r = phy_read_status(priv->phydev);
489 
490 		if (r == 0 && priv->phydev->link == 0)
491 			netif_carrier_off(dev);
492 		cvm_oct_adjust_link(dev);
493 	} else {
494 		link_info = cvmx_helper_link_get(priv->port);
495 		if (!link_info.s.link_up)
496 			netif_carrier_off(dev);
497 		priv->poll = link_poll;
498 		link_poll(dev);
499 	}
500 
501 	return 0;
502 }
503 
504 void cvm_oct_link_poll(struct net_device *dev)
505 {
506 	struct octeon_ethernet *priv = netdev_priv(dev);
507 	cvmx_helper_link_info_t link_info;
508 
509 	link_info = cvmx_helper_link_get(priv->port);
510 	if (link_info.u64 == priv->link_info)
511 		return;
512 
513 	link_info = cvmx_helper_link_autoconf(priv->port);
514 	priv->link_info = link_info.u64;
515 
516 	if (link_info.s.link_up) {
517 		if (!netif_carrier_ok(dev))
518 			netif_carrier_on(dev);
519 	} else if (netif_carrier_ok(dev)) {
520 		netif_carrier_off(dev);
521 	}
522 	cvm_oct_note_carrier(priv, link_info);
523 }
524 
525 static int cvm_oct_xaui_open(struct net_device *dev)
526 {
527 	return cvm_oct_common_open(dev, cvm_oct_link_poll);
528 }
529 
530 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
531 	.ndo_init		= cvm_oct_common_init,
532 	.ndo_uninit		= cvm_oct_common_uninit,
533 	.ndo_start_xmit		= cvm_oct_xmit,
534 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
535 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
536 	.ndo_do_ioctl		= cvm_oct_ioctl,
537 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
538 	.ndo_get_stats		= cvm_oct_common_get_stats,
539 #ifdef CONFIG_NET_POLL_CONTROLLER
540 	.ndo_poll_controller	= cvm_oct_poll_controller,
541 #endif
542 };
543 
544 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
545 	.ndo_init		= cvm_oct_common_init,
546 	.ndo_uninit		= cvm_oct_common_uninit,
547 	.ndo_open		= cvm_oct_xaui_open,
548 	.ndo_stop		= cvm_oct_common_stop,
549 	.ndo_start_xmit		= cvm_oct_xmit,
550 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
551 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
552 	.ndo_do_ioctl		= cvm_oct_ioctl,
553 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
554 	.ndo_get_stats		= cvm_oct_common_get_stats,
555 #ifdef CONFIG_NET_POLL_CONTROLLER
556 	.ndo_poll_controller	= cvm_oct_poll_controller,
557 #endif
558 };
559 
560 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
561 	.ndo_init		= cvm_oct_sgmii_init,
562 	.ndo_uninit		= cvm_oct_common_uninit,
563 	.ndo_open		= cvm_oct_sgmii_open,
564 	.ndo_stop		= cvm_oct_common_stop,
565 	.ndo_start_xmit		= cvm_oct_xmit,
566 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
567 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
568 	.ndo_do_ioctl		= cvm_oct_ioctl,
569 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
570 	.ndo_get_stats		= cvm_oct_common_get_stats,
571 #ifdef CONFIG_NET_POLL_CONTROLLER
572 	.ndo_poll_controller	= cvm_oct_poll_controller,
573 #endif
574 };
575 
576 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
577 	.ndo_init		= cvm_oct_spi_init,
578 	.ndo_uninit		= cvm_oct_spi_uninit,
579 	.ndo_start_xmit		= cvm_oct_xmit,
580 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
581 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
582 	.ndo_do_ioctl		= cvm_oct_ioctl,
583 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
584 	.ndo_get_stats		= cvm_oct_common_get_stats,
585 #ifdef CONFIG_NET_POLL_CONTROLLER
586 	.ndo_poll_controller	= cvm_oct_poll_controller,
587 #endif
588 };
589 
590 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
591 	.ndo_init		= cvm_oct_common_init,
592 	.ndo_uninit		= cvm_oct_common_uninit,
593 	.ndo_open		= cvm_oct_rgmii_open,
594 	.ndo_stop		= cvm_oct_common_stop,
595 	.ndo_start_xmit		= cvm_oct_xmit,
596 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
597 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
598 	.ndo_do_ioctl		= cvm_oct_ioctl,
599 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
600 	.ndo_get_stats		= cvm_oct_common_get_stats,
601 #ifdef CONFIG_NET_POLL_CONTROLLER
602 	.ndo_poll_controller	= cvm_oct_poll_controller,
603 #endif
604 };
605 
606 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
607 	.ndo_init		= cvm_oct_common_init,
608 	.ndo_start_xmit		= cvm_oct_xmit_pow,
609 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
610 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
611 	.ndo_do_ioctl		= cvm_oct_ioctl,
612 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
613 	.ndo_get_stats		= cvm_oct_common_get_stats,
614 #ifdef CONFIG_NET_POLL_CONTROLLER
615 	.ndo_poll_controller	= cvm_oct_poll_controller,
616 #endif
617 };
618 
619 static struct device_node *cvm_oct_of_get_child(
620 				const struct device_node *parent, int reg_val)
621 {
622 	struct device_node *node = NULL;
623 	int size;
624 	const __be32 *addr;
625 
626 	for (;;) {
627 		node = of_get_next_child(parent, node);
628 		if (!node)
629 			break;
630 		addr = of_get_property(node, "reg", &size);
631 		if (addr && (be32_to_cpu(*addr) == reg_val))
632 			break;
633 	}
634 	return node;
635 }
636 
637 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
638 							int interface, int port)
639 {
640 	struct device_node *ni, *np;
641 
642 	ni = cvm_oct_of_get_child(pip, interface);
643 	if (!ni)
644 		return NULL;
645 
646 	np = cvm_oct_of_get_child(ni, port);
647 	of_node_put(ni);
648 
649 	return np;
650 }
651 
652 static int cvm_oct_probe(struct platform_device *pdev)
653 {
654 	int num_interfaces;
655 	int interface;
656 	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
657 	int qos;
658 	struct device_node *pip;
659 
660 	octeon_mdiobus_force_mod_depencency();
661 
662 	pip = pdev->dev.of_node;
663 	if (!pip) {
664 		pr_err("Error: No 'pip' in /aliases\n");
665 		return -EINVAL;
666 	}
667 
668 
669 	cvm_oct_configure_common_hw();
670 
671 	cvmx_helper_initialize_packet_io_global();
672 
673 	/* Change the input group for all ports before input is enabled */
674 	num_interfaces = cvmx_helper_get_number_of_interfaces();
675 	for (interface = 0; interface < num_interfaces; interface++) {
676 		int num_ports = cvmx_helper_ports_on_interface(interface);
677 		int port;
678 
679 		for (port = cvmx_helper_get_ipd_port(interface, 0);
680 		     port < cvmx_helper_get_ipd_port(interface, num_ports);
681 		     port++) {
682 			union cvmx_pip_prt_tagx pip_prt_tagx;
683 
684 			pip_prt_tagx.u64 =
685 			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
686 			pip_prt_tagx.s.grp = pow_receive_group;
687 			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
688 				       pip_prt_tagx.u64);
689 		}
690 	}
691 
692 	cvmx_helper_ipd_and_packet_input_enable();
693 
694 	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
695 
696 	/*
697 	 * Initialize the FAU used for counting packet buffers that
698 	 * need to be freed.
699 	 */
700 	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
701 
702 	/* Initialize the FAU used for counting tx SKBs that need to be freed */
703 	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
704 
705 	if ((pow_send_group != -1)) {
706 		struct net_device *dev;
707 
708 		pr_info("\tConfiguring device for POW only access\n");
709 		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
710 		if (dev) {
711 			/* Initialize the device private structure. */
712 			struct octeon_ethernet *priv = netdev_priv(dev);
713 
714 			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
715 			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
716 			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
717 			priv->queue = -1;
718 			strcpy(dev->name, "pow%d");
719 			for (qos = 0; qos < 16; qos++)
720 				skb_queue_head_init(&priv->tx_free_list[qos]);
721 
722 			if (register_netdev(dev) < 0) {
723 				pr_err("Failed to register ethernet device for POW\n");
724 				free_netdev(dev);
725 			} else {
726 				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
727 				pr_info("%s: POW send group %d, receive group %d\n",
728 					dev->name, pow_send_group,
729 					pow_receive_group);
730 			}
731 		} else {
732 			pr_err("Failed to allocate ethernet device for POW\n");
733 		}
734 	}
735 
736 	num_interfaces = cvmx_helper_get_number_of_interfaces();
737 	for (interface = 0; interface < num_interfaces; interface++) {
738 		cvmx_helper_interface_mode_t imode =
739 		    cvmx_helper_interface_get_mode(interface);
740 		int num_ports = cvmx_helper_ports_on_interface(interface);
741 		int port;
742 		int port_index;
743 
744 		for (port_index = 0,
745 		     port = cvmx_helper_get_ipd_port(interface, 0);
746 		     port < cvmx_helper_get_ipd_port(interface, num_ports);
747 		     port_index++, port++) {
748 			struct octeon_ethernet *priv;
749 			struct net_device *dev =
750 			    alloc_etherdev(sizeof(struct octeon_ethernet));
751 			if (!dev) {
752 				pr_err("Failed to allocate ethernet device for port %d\n",
753 				       port);
754 				continue;
755 			}
756 
757 			/* Initialize the device private structure. */
758 			priv = netdev_priv(dev);
759 			priv->netdev = dev;
760 			priv->of_node = cvm_oct_node_for_port(pip, interface,
761 								port_index);
762 
763 			INIT_DELAYED_WORK(&priv->port_periodic_work,
764 					  cvm_oct_periodic_worker);
765 			priv->imode = imode;
766 			priv->port = port;
767 			priv->queue = cvmx_pko_get_base_queue(priv->port);
768 			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
769 			for (qos = 0; qos < 16; qos++)
770 				skb_queue_head_init(&priv->tx_free_list[qos]);
771 			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
772 			     qos++)
773 				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
774 
775 			switch (priv->imode) {
776 			/* These types don't support ports to IPD/PKO */
777 			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
778 			case CVMX_HELPER_INTERFACE_MODE_PCIE:
779 			case CVMX_HELPER_INTERFACE_MODE_PICMG:
780 				break;
781 
782 			case CVMX_HELPER_INTERFACE_MODE_NPI:
783 				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
784 				strcpy(dev->name, "npi%d");
785 				break;
786 
787 			case CVMX_HELPER_INTERFACE_MODE_XAUI:
788 				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
789 				strcpy(dev->name, "xaui%d");
790 				break;
791 
792 			case CVMX_HELPER_INTERFACE_MODE_LOOP:
793 				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
794 				strcpy(dev->name, "loop%d");
795 				break;
796 
797 			case CVMX_HELPER_INTERFACE_MODE_SGMII:
798 				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
799 				strcpy(dev->name, "eth%d");
800 				break;
801 
802 			case CVMX_HELPER_INTERFACE_MODE_SPI:
803 				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
804 				strcpy(dev->name, "spi%d");
805 				break;
806 
807 			case CVMX_HELPER_INTERFACE_MODE_RGMII:
808 			case CVMX_HELPER_INTERFACE_MODE_GMII:
809 				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
810 				strcpy(dev->name, "eth%d");
811 				break;
812 			}
813 
814 			if (!dev->netdev_ops) {
815 				free_netdev(dev);
816 			} else if (register_netdev(dev) < 0) {
817 				pr_err("Failed to register ethernet device for interface %d, port %d\n",
818 					 interface, priv->port);
819 				free_netdev(dev);
820 			} else {
821 				cvm_oct_device[priv->port] = dev;
822 				fau -=
823 				    cvmx_pko_get_num_queues(priv->port) *
824 				    sizeof(u32);
825 				schedule_delayed_work(&priv->port_periodic_work, HZ);
826 			}
827 		}
828 	}
829 
830 	cvm_oct_tx_initialize();
831 	cvm_oct_rx_initialize();
832 
833 	/*
834 	 * 150 uS: about 10 1500-byte packets at 1GE.
835 	 */
836 	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
837 
838 	schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
839 
840 	return 0;
841 }
842 
843 static int cvm_oct_remove(struct platform_device *pdev)
844 {
845 	int port;
846 
847 	/* Disable POW interrupt */
848 	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
849 		cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
850 	else
851 		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
852 
853 	cvmx_ipd_disable();
854 
855 	/* Free the interrupt handler */
856 	free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
857 
858 	atomic_inc_return(&cvm_oct_poll_queue_stopping);
859 	cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
860 
861 	cvm_oct_rx_shutdown();
862 	cvm_oct_tx_shutdown();
863 
864 	cvmx_pko_disable();
865 
866 	/* Free the ethernet devices */
867 	for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
868 		if (cvm_oct_device[port]) {
869 			struct net_device *dev = cvm_oct_device[port];
870 			struct octeon_ethernet *priv = netdev_priv(dev);
871 
872 			cancel_delayed_work_sync(&priv->port_periodic_work);
873 
874 			cvm_oct_tx_shutdown_dev(dev);
875 			unregister_netdev(dev);
876 			free_netdev(dev);
877 			cvm_oct_device[port] = NULL;
878 		}
879 	}
880 
881 
882 	cvmx_pko_shutdown();
883 
884 	cvmx_ipd_free_ptr();
885 
886 	/* Free the HW pools */
887 	cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
888 			      num_packet_buffers);
889 	cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
890 			      num_packet_buffers);
891 	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
892 		cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
893 				      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
894 	return 0;
895 }
896 
897 static const struct of_device_id cvm_oct_match[] = {
898 	{
899 		.compatible = "cavium,octeon-3860-pip",
900 	},
901 	{},
902 };
903 MODULE_DEVICE_TABLE(of, cvm_oct_match);
904 
905 static struct platform_driver cvm_oct_driver = {
906 	.probe		= cvm_oct_probe,
907 	.remove		= cvm_oct_remove,
908 	.driver		= {
909 		.name	= KBUILD_MODNAME,
910 		.of_match_table = cvm_oct_match,
911 	},
912 };
913 
914 module_platform_driver(cvm_oct_driver);
915 
916 MODULE_LICENSE("GPL");
917 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
918 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
919