1 /*
2  * This file is based on code from OCTEON SDK by Cavium Networks.
3  *
4  * Copyright (c) 2003-2007 Cavium Networks
5  *
6  * This file is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License, Version 2, as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_net.h>
20 
21 #include <net/dst.h>
22 
23 #include <asm/octeon/octeon.h>
24 
25 #include "ethernet-defines.h"
26 #include "octeon-ethernet.h"
27 #include "ethernet-mem.h"
28 #include "ethernet-rx.h"
29 #include "ethernet-tx.h"
30 #include "ethernet-mdio.h"
31 #include "ethernet-util.h"
32 
33 #include <asm/octeon/cvmx-pip.h>
34 #include <asm/octeon/cvmx-pko.h>
35 #include <asm/octeon/cvmx-fau.h>
36 #include <asm/octeon/cvmx-ipd.h>
37 #include <asm/octeon/cvmx-helper.h>
38 
39 #include <asm/octeon/cvmx-gmxx-defs.h>
40 #include <asm/octeon/cvmx-smix-defs.h>
41 
42 static int num_packet_buffers = 1024;
43 module_param(num_packet_buffers, int, 0444);
44 MODULE_PARM_DESC(num_packet_buffers, "\n"
45 	"\tNumber of packet buffers to allocate and store in the\n"
46 	"\tFPA. By default, 1024 packet buffers are used.\n");
47 
48 int pow_receive_group = 15;
49 module_param(pow_receive_group, int, 0444);
50 MODULE_PARM_DESC(pow_receive_group, "\n"
51 	"\tPOW group to receive packets from. All ethernet hardware\n"
52 	"\twill be configured to send incoming packets to this POW\n"
53 	"\tgroup. Also any other software can submit packets to this\n"
54 	"\tgroup for the kernel to process.");
55 
56 int pow_send_group = -1;
57 module_param(pow_send_group, int, 0644);
58 MODULE_PARM_DESC(pow_send_group, "\n"
59 	"\tPOW group to send packets to other software on. This\n"
60 	"\tcontrols the creation of the virtual device pow0.\n"
61 	"\talways_use_pow also depends on this value.");
62 
63 int always_use_pow;
64 module_param(always_use_pow, int, 0444);
65 MODULE_PARM_DESC(always_use_pow, "\n"
66 	"\tWhen set, always send to the pow group. This will cause\n"
67 	"\tpackets sent to real ethernet devices to be sent to the\n"
68 	"\tPOW group instead of the hardware. Unless some other\n"
69 	"\tapplication changes the config, packets will still be\n"
70 	"\treceived from the low level hardware. Use this option\n"
71 	"\tto allow a CVMX app to intercept all packets from the\n"
72 	"\tlinux kernel. You must specify pow_send_group along with\n"
73 	"\tthis option.");
74 
75 char pow_send_list[128] = "";
76 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
77 MODULE_PARM_DESC(pow_send_list, "\n"
78 	"\tComma separated list of ethernet devices that should use the\n"
79 	"\tPOW for transmit instead of the actual ethernet hardware. This\n"
80 	"\tis a per port version of always_use_pow. always_use_pow takes\n"
81 	"\tprecedence over this list. For example, setting this to\n"
82 	"\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
83 	"\tusing the pow_send_group.");
84 
85 int rx_napi_weight = 32;
86 module_param(rx_napi_weight, int, 0444);
87 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
88 
89 /*
90  * cvm_oct_poll_queue - Workqueue for polling operations.
91  */
92 struct workqueue_struct *cvm_oct_poll_queue;
93 
94 /*
95  * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
96  *
97  * Set to one right before cvm_oct_poll_queue is destroyed.
98  */
99 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
100 
101 /*
102  * Array of every ethernet device owned by this driver indexed by
103  * the ipd input port number.
104  */
105 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
106 
107 u64 cvm_oct_tx_poll_interval;
108 
109 static void cvm_oct_rx_refill_worker(struct work_struct *work);
110 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
111 
112 static void cvm_oct_rx_refill_worker(struct work_struct *work)
113 {
114 	/*
115 	 * FPA 0 may have been drained, try to refill it if we need
116 	 * more than num_packet_buffers / 2, otherwise normal receive
117 	 * processing will refill it.  If it were drained, no packets
118 	 * could be received so cvm_oct_napi_poll would never be
119 	 * invoked to do the refill.
120 	 */
121 	cvm_oct_rx_refill_pool(num_packet_buffers / 2);
122 
123 	if (!atomic_read(&cvm_oct_poll_queue_stopping))
124 		queue_delayed_work(cvm_oct_poll_queue,
125 				   &cvm_oct_rx_refill_work, HZ);
126 }
127 
128 static void cvm_oct_periodic_worker(struct work_struct *work)
129 {
130 	struct octeon_ethernet *priv = container_of(work,
131 						    struct octeon_ethernet,
132 						    port_periodic_work.work);
133 
134 	if (priv->poll)
135 		priv->poll(cvm_oct_device[priv->port]);
136 
137 	cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
138 						cvm_oct_device[priv->port]);
139 
140 	if (!atomic_read(&cvm_oct_poll_queue_stopping))
141 		queue_delayed_work(cvm_oct_poll_queue,
142 						&priv->port_periodic_work, HZ);
143 }
144 
145 static void cvm_oct_configure_common_hw(void)
146 {
147 	/* Setup the FPA */
148 	cvmx_fpa_enable();
149 	cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
150 			     num_packet_buffers);
151 	cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
152 			     num_packet_buffers);
153 	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
154 		cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
155 				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
156 
157 #ifdef __LITTLE_ENDIAN
158 	{
159 		union cvmx_ipd_ctl_status ipd_ctl_status;
160 
161 		ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
162 		ipd_ctl_status.s.pkt_lend = 1;
163 		ipd_ctl_status.s.wqe_lend = 1;
164 		cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
165 	}
166 #endif
167 
168 	cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
169 }
170 
171 /**
172  * cvm_oct_free_work- Free a work queue entry
173  *
174  * @work_queue_entry: Work queue entry to free
175  *
176  * Returns Zero on success, Negative on failure.
177  */
178 int cvm_oct_free_work(void *work_queue_entry)
179 {
180 	cvmx_wqe_t *work = work_queue_entry;
181 
182 	int segments = work->word2.s.bufs;
183 	union cvmx_buf_ptr segment_ptr = work->packet_ptr;
184 
185 	while (segments--) {
186 		union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
187 			cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
188 		if (unlikely(!segment_ptr.s.i))
189 			cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
190 				      segment_ptr.s.pool,
191 				      CVMX_FPA_PACKET_POOL_SIZE / 128);
192 		segment_ptr = next_ptr;
193 	}
194 	cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
195 
196 	return 0;
197 }
198 EXPORT_SYMBOL(cvm_oct_free_work);
199 
200 /**
201  * cvm_oct_common_get_stats - get the low level ethernet statistics
202  * @dev:    Device to get the statistics from
203  *
204  * Returns Pointer to the statistics
205  */
206 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
207 {
208 	cvmx_pip_port_status_t rx_status;
209 	cvmx_pko_port_status_t tx_status;
210 	struct octeon_ethernet *priv = netdev_priv(dev);
211 
212 	if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
213 		if (octeon_is_simulation()) {
214 			/* The simulator doesn't support statistics */
215 			memset(&rx_status, 0, sizeof(rx_status));
216 			memset(&tx_status, 0, sizeof(tx_status));
217 		} else {
218 			cvmx_pip_get_port_status(priv->port, 1, &rx_status);
219 			cvmx_pko_get_port_status(priv->port, 1, &tx_status);
220 		}
221 
222 		priv->stats.rx_packets += rx_status.inb_packets;
223 		priv->stats.tx_packets += tx_status.packets;
224 		priv->stats.rx_bytes += rx_status.inb_octets;
225 		priv->stats.tx_bytes += tx_status.octets;
226 		priv->stats.multicast += rx_status.multicast_packets;
227 		priv->stats.rx_crc_errors += rx_status.inb_errors;
228 		priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
229 
230 		/*
231 		 * The drop counter must be incremented atomically
232 		 * since the RX tasklet also increments it.
233 		 */
234 #ifdef CONFIG_64BIT
235 		atomic64_add(rx_status.dropped_packets,
236 			     (atomic64_t *)&priv->stats.rx_dropped);
237 #else
238 		atomic_add(rx_status.dropped_packets,
239 			     (atomic_t *)&priv->stats.rx_dropped);
240 #endif
241 	}
242 
243 	return &priv->stats;
244 }
245 
246 /**
247  * cvm_oct_common_change_mtu - change the link MTU
248  * @dev:     Device to change
249  * @new_mtu: The new MTU
250  *
251  * Returns Zero on success
252  */
253 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
254 {
255 	struct octeon_ethernet *priv = netdev_priv(dev);
256 	int interface = INTERFACE(priv->port);
257 	int index = INDEX(priv->port);
258 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
259 	int vlan_bytes = 4;
260 #else
261 	int vlan_bytes = 0;
262 #endif
263 
264 	/*
265 	 * Limit the MTU to make sure the ethernet packets are between
266 	 * 64 bytes and 65535 bytes.
267 	 */
268 	if ((new_mtu + 14 + 4 + vlan_bytes < 64)
269 	    || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
270 		pr_err("MTU must be between %d and %d.\n",
271 		       64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
272 		return -EINVAL;
273 	}
274 	dev->mtu = new_mtu;
275 
276 	if ((interface < 2)
277 	    && (cvmx_helper_interface_get_mode(interface) !=
278 		CVMX_HELPER_INTERFACE_MODE_SPI)) {
279 		/* Add ethernet header and FCS, and VLAN if configured. */
280 		int max_packet = new_mtu + 14 + 4 + vlan_bytes;
281 
282 		if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
283 		    || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
284 			/* Signal errors on packets larger than the MTU */
285 			cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
286 				       max_packet);
287 		} else {
288 			/*
289 			 * Set the hardware to truncate packets larger
290 			 * than the MTU and smaller the 64 bytes.
291 			 */
292 			union cvmx_pip_frm_len_chkx frm_len_chk;
293 
294 			frm_len_chk.u64 = 0;
295 			frm_len_chk.s.minlen = 64;
296 			frm_len_chk.s.maxlen = max_packet;
297 			cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
298 				       frm_len_chk.u64);
299 		}
300 		/*
301 		 * Set the hardware to truncate packets larger than
302 		 * the MTU. The jabber register must be set to a
303 		 * multiple of 8 bytes, so round up.
304 		 */
305 		cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
306 			       (max_packet + 7) & ~7u);
307 	}
308 	return 0;
309 }
310 
311 /**
312  * cvm_oct_common_set_multicast_list - set the multicast list
313  * @dev:    Device to work on
314  */
315 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
316 {
317 	union cvmx_gmxx_prtx_cfg gmx_cfg;
318 	struct octeon_ethernet *priv = netdev_priv(dev);
319 	int interface = INTERFACE(priv->port);
320 	int index = INDEX(priv->port);
321 
322 	if ((interface < 2)
323 	    && (cvmx_helper_interface_get_mode(interface) !=
324 		CVMX_HELPER_INTERFACE_MODE_SPI)) {
325 		union cvmx_gmxx_rxx_adr_ctl control;
326 
327 		control.u64 = 0;
328 		control.s.bcst = 1;	/* Allow broadcast MAC addresses */
329 
330 		if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
331 		    (dev->flags & IFF_PROMISC))
332 			/* Force accept multicast packets */
333 			control.s.mcst = 2;
334 		else
335 			/* Force reject multicast packets */
336 			control.s.mcst = 1;
337 
338 		if (dev->flags & IFF_PROMISC)
339 			/*
340 			 * Reject matches if promisc. Since CAM is
341 			 * shut off, should accept everything.
342 			 */
343 			control.s.cam_mode = 0;
344 		else
345 			/* Filter packets based on the CAM */
346 			control.s.cam_mode = 1;
347 
348 		gmx_cfg.u64 =
349 		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
350 		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
351 			       gmx_cfg.u64 & ~1ull);
352 
353 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
354 			       control.u64);
355 		if (dev->flags & IFF_PROMISC)
356 			cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
357 				       (index, interface), 0);
358 		else
359 			cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
360 				       (index, interface), 1);
361 
362 		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
363 			       gmx_cfg.u64);
364 	}
365 }
366 
367 static int cvm_oct_set_mac_filter(struct net_device *dev)
368 {
369 	struct octeon_ethernet *priv = netdev_priv(dev);
370 	union cvmx_gmxx_prtx_cfg gmx_cfg;
371 	int interface = INTERFACE(priv->port);
372 	int index = INDEX(priv->port);
373 
374 	if ((interface < 2)
375 	    && (cvmx_helper_interface_get_mode(interface) !=
376 		CVMX_HELPER_INTERFACE_MODE_SPI)) {
377 		int i;
378 		u8 *ptr = dev->dev_addr;
379 		u64 mac = 0;
380 
381 		for (i = 0; i < 6; i++)
382 			mac = (mac << 8) | (u64)ptr[i];
383 
384 		gmx_cfg.u64 =
385 		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
386 		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
387 			       gmx_cfg.u64 & ~1ull);
388 
389 		cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
390 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
391 			       ptr[0]);
392 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
393 			       ptr[1]);
394 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
395 			       ptr[2]);
396 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
397 			       ptr[3]);
398 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
399 			       ptr[4]);
400 		cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
401 			       ptr[5]);
402 		cvm_oct_common_set_multicast_list(dev);
403 		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
404 			       gmx_cfg.u64);
405 	}
406 	return 0;
407 }
408 
409 /**
410  * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
411  * @dev:    The device in question.
412  * @addr:   Socket address.
413  *
414  * Returns Zero on success
415  */
416 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
417 {
418 	int r = eth_mac_addr(dev, addr);
419 
420 	if (r)
421 		return r;
422 	return cvm_oct_set_mac_filter(dev);
423 }
424 
425 /**
426  * cvm_oct_common_init - per network device initialization
427  * @dev:    Device to initialize
428  *
429  * Returns Zero on success
430  */
431 int cvm_oct_common_init(struct net_device *dev)
432 {
433 	struct octeon_ethernet *priv = netdev_priv(dev);
434 	const u8 *mac = NULL;
435 
436 	if (priv->of_node)
437 		mac = of_get_mac_address(priv->of_node);
438 
439 	if (mac)
440 		ether_addr_copy(dev->dev_addr, mac);
441 	else
442 		eth_hw_addr_random(dev);
443 
444 	/*
445 	 * Force the interface to use the POW send if always_use_pow
446 	 * was specified or it is in the pow send list.
447 	 */
448 	if ((pow_send_group != -1)
449 	    && (always_use_pow || strstr(pow_send_list, dev->name)))
450 		priv->queue = -1;
451 
452 	if (priv->queue != -1)
453 		dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
454 
455 	/* We do our own locking, Linux doesn't need to */
456 	dev->features |= NETIF_F_LLTX;
457 	dev->ethtool_ops = &cvm_oct_ethtool_ops;
458 
459 	cvm_oct_set_mac_filter(dev);
460 	dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
461 
462 	/*
463 	 * Zero out stats for port so we won't mistakenly show
464 	 * counters from the bootloader.
465 	 */
466 	memset(dev->netdev_ops->ndo_get_stats(dev), 0,
467 	       sizeof(struct net_device_stats));
468 
469 	if (dev->netdev_ops->ndo_stop)
470 		dev->netdev_ops->ndo_stop(dev);
471 
472 	return 0;
473 }
474 
475 void cvm_oct_common_uninit(struct net_device *dev)
476 {
477 	struct octeon_ethernet *priv = netdev_priv(dev);
478 
479 	if (priv->phydev)
480 		phy_disconnect(priv->phydev);
481 }
482 
483 int cvm_oct_common_open(struct net_device *dev,
484 			void (*link_poll)(struct net_device *))
485 {
486 	union cvmx_gmxx_prtx_cfg gmx_cfg;
487 	struct octeon_ethernet *priv = netdev_priv(dev);
488 	int interface = INTERFACE(priv->port);
489 	int index = INDEX(priv->port);
490 	cvmx_helper_link_info_t link_info;
491 	int rv;
492 
493 	rv = cvm_oct_phy_setup_device(dev);
494 	if (rv)
495 		return rv;
496 
497 	gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
498 	gmx_cfg.s.en = 1;
499 	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
500 
501 	if (octeon_is_simulation())
502 		return 0;
503 
504 	if (priv->phydev) {
505 		int r = phy_read_status(priv->phydev);
506 
507 		if (r == 0 && priv->phydev->link == 0)
508 			netif_carrier_off(dev);
509 		cvm_oct_adjust_link(dev);
510 	} else {
511 		link_info = cvmx_helper_link_get(priv->port);
512 		if (!link_info.s.link_up)
513 			netif_carrier_off(dev);
514 		priv->poll = link_poll;
515 		link_poll(dev);
516 	}
517 
518 	return 0;
519 }
520 
521 void cvm_oct_link_poll(struct net_device *dev)
522 {
523 	struct octeon_ethernet *priv = netdev_priv(dev);
524 	cvmx_helper_link_info_t link_info;
525 
526 	link_info = cvmx_helper_link_get(priv->port);
527 	if (link_info.u64 == priv->link_info)
528 		return;
529 
530 	link_info = cvmx_helper_link_autoconf(priv->port);
531 	priv->link_info = link_info.u64;
532 
533 	if (link_info.s.link_up) {
534 		if (!netif_carrier_ok(dev))
535 			netif_carrier_on(dev);
536 	} else if (netif_carrier_ok(dev)) {
537 		netif_carrier_off(dev);
538 	}
539 	cvm_oct_note_carrier(priv, link_info);
540 }
541 
542 static int cvm_oct_xaui_open(struct net_device *dev)
543 {
544 	return cvm_oct_common_open(dev, cvm_oct_link_poll);
545 }
546 
547 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
548 	.ndo_init		= cvm_oct_common_init,
549 	.ndo_uninit		= cvm_oct_common_uninit,
550 	.ndo_start_xmit		= cvm_oct_xmit,
551 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
552 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
553 	.ndo_do_ioctl		= cvm_oct_ioctl,
554 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
555 	.ndo_get_stats		= cvm_oct_common_get_stats,
556 #ifdef CONFIG_NET_POLL_CONTROLLER
557 	.ndo_poll_controller	= cvm_oct_poll_controller,
558 #endif
559 };
560 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
561 	.ndo_init		= cvm_oct_common_init,
562 	.ndo_uninit		= cvm_oct_common_uninit,
563 	.ndo_open		= cvm_oct_xaui_open,
564 	.ndo_stop		= cvm_oct_common_stop,
565 	.ndo_start_xmit		= cvm_oct_xmit,
566 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
567 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
568 	.ndo_do_ioctl		= cvm_oct_ioctl,
569 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
570 	.ndo_get_stats		= cvm_oct_common_get_stats,
571 #ifdef CONFIG_NET_POLL_CONTROLLER
572 	.ndo_poll_controller	= cvm_oct_poll_controller,
573 #endif
574 };
575 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
576 	.ndo_init		= cvm_oct_sgmii_init,
577 	.ndo_uninit		= cvm_oct_common_uninit,
578 	.ndo_open		= cvm_oct_sgmii_open,
579 	.ndo_stop		= cvm_oct_common_stop,
580 	.ndo_start_xmit		= cvm_oct_xmit,
581 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
582 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
583 	.ndo_do_ioctl		= cvm_oct_ioctl,
584 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
585 	.ndo_get_stats		= cvm_oct_common_get_stats,
586 #ifdef CONFIG_NET_POLL_CONTROLLER
587 	.ndo_poll_controller	= cvm_oct_poll_controller,
588 #endif
589 };
590 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
591 	.ndo_init		= cvm_oct_spi_init,
592 	.ndo_uninit		= cvm_oct_spi_uninit,
593 	.ndo_start_xmit		= cvm_oct_xmit,
594 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
595 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
596 	.ndo_do_ioctl		= cvm_oct_ioctl,
597 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
598 	.ndo_get_stats		= cvm_oct_common_get_stats,
599 #ifdef CONFIG_NET_POLL_CONTROLLER
600 	.ndo_poll_controller	= cvm_oct_poll_controller,
601 #endif
602 };
603 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
604 	.ndo_init		= cvm_oct_rgmii_init,
605 	.ndo_uninit		= cvm_oct_rgmii_uninit,
606 	.ndo_open		= cvm_oct_rgmii_open,
607 	.ndo_stop		= cvm_oct_common_stop,
608 	.ndo_start_xmit		= cvm_oct_xmit,
609 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
610 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
611 	.ndo_do_ioctl		= cvm_oct_ioctl,
612 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
613 	.ndo_get_stats		= cvm_oct_common_get_stats,
614 #ifdef CONFIG_NET_POLL_CONTROLLER
615 	.ndo_poll_controller	= cvm_oct_poll_controller,
616 #endif
617 };
618 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
619 	.ndo_init		= cvm_oct_common_init,
620 	.ndo_start_xmit		= cvm_oct_xmit_pow,
621 	.ndo_set_rx_mode	= cvm_oct_common_set_multicast_list,
622 	.ndo_set_mac_address	= cvm_oct_common_set_mac_address,
623 	.ndo_do_ioctl		= cvm_oct_ioctl,
624 	.ndo_change_mtu		= cvm_oct_common_change_mtu,
625 	.ndo_get_stats		= cvm_oct_common_get_stats,
626 #ifdef CONFIG_NET_POLL_CONTROLLER
627 	.ndo_poll_controller	= cvm_oct_poll_controller,
628 #endif
629 };
630 
631 static struct device_node *cvm_oct_of_get_child(
632 				const struct device_node *parent, int reg_val)
633 {
634 	struct device_node *node = NULL;
635 	int size;
636 	const __be32 *addr;
637 
638 	for (;;) {
639 		node = of_get_next_child(parent, node);
640 		if (!node)
641 			break;
642 		addr = of_get_property(node, "reg", &size);
643 		if (addr && (be32_to_cpu(*addr) == reg_val))
644 			break;
645 	}
646 	return node;
647 }
648 
649 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
650 							int interface, int port)
651 {
652 	struct device_node *ni, *np;
653 
654 	ni = cvm_oct_of_get_child(pip, interface);
655 	if (!ni)
656 		return NULL;
657 
658 	np = cvm_oct_of_get_child(ni, port);
659 	of_node_put(ni);
660 
661 	return np;
662 }
663 
664 static int cvm_oct_probe(struct platform_device *pdev)
665 {
666 	int num_interfaces;
667 	int interface;
668 	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
669 	int qos;
670 	struct device_node *pip;
671 
672 	octeon_mdiobus_force_mod_depencency();
673 
674 	pip = pdev->dev.of_node;
675 	if (!pip) {
676 		pr_err("Error: No 'pip' in /aliases\n");
677 		return -EINVAL;
678 	}
679 
680 	cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
681 	if (!cvm_oct_poll_queue) {
682 		pr_err("octeon-ethernet: Cannot create workqueue");
683 		return -ENOMEM;
684 	}
685 
686 	cvm_oct_configure_common_hw();
687 
688 	cvmx_helper_initialize_packet_io_global();
689 
690 	/* Change the input group for all ports before input is enabled */
691 	num_interfaces = cvmx_helper_get_number_of_interfaces();
692 	for (interface = 0; interface < num_interfaces; interface++) {
693 		int num_ports = cvmx_helper_ports_on_interface(interface);
694 		int port;
695 
696 		for (port = cvmx_helper_get_ipd_port(interface, 0);
697 		     port < cvmx_helper_get_ipd_port(interface, num_ports);
698 		     port++) {
699 			union cvmx_pip_prt_tagx pip_prt_tagx;
700 
701 			pip_prt_tagx.u64 =
702 			    cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
703 			pip_prt_tagx.s.grp = pow_receive_group;
704 			cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
705 				       pip_prt_tagx.u64);
706 		}
707 	}
708 
709 	cvmx_helper_ipd_and_packet_input_enable();
710 
711 	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
712 
713 	/*
714 	 * Initialize the FAU used for counting packet buffers that
715 	 * need to be freed.
716 	 */
717 	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
718 
719 	/* Initialize the FAU used for counting tx SKBs that need to be freed */
720 	cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
721 
722 	if ((pow_send_group != -1)) {
723 		struct net_device *dev;
724 
725 		pr_info("\tConfiguring device for POW only access\n");
726 		dev = alloc_etherdev(sizeof(struct octeon_ethernet));
727 		if (dev) {
728 			/* Initialize the device private structure. */
729 			struct octeon_ethernet *priv = netdev_priv(dev);
730 
731 			dev->netdev_ops = &cvm_oct_pow_netdev_ops;
732 			priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
733 			priv->port = CVMX_PIP_NUM_INPUT_PORTS;
734 			priv->queue = -1;
735 			strcpy(dev->name, "pow%d");
736 			for (qos = 0; qos < 16; qos++)
737 				skb_queue_head_init(&priv->tx_free_list[qos]);
738 
739 			if (register_netdev(dev) < 0) {
740 				pr_err("Failed to register ethernet device for POW\n");
741 				free_netdev(dev);
742 			} else {
743 				cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
744 				pr_info("%s: POW send group %d, receive group %d\n",
745 					dev->name, pow_send_group,
746 					pow_receive_group);
747 			}
748 		} else {
749 			pr_err("Failed to allocate ethernet device for POW\n");
750 		}
751 	}
752 
753 	num_interfaces = cvmx_helper_get_number_of_interfaces();
754 	for (interface = 0; interface < num_interfaces; interface++) {
755 		cvmx_helper_interface_mode_t imode =
756 		    cvmx_helper_interface_get_mode(interface);
757 		int num_ports = cvmx_helper_ports_on_interface(interface);
758 		int port;
759 		int port_index;
760 
761 		for (port_index = 0,
762 		     port = cvmx_helper_get_ipd_port(interface, 0);
763 		     port < cvmx_helper_get_ipd_port(interface, num_ports);
764 		     port_index++, port++) {
765 			struct octeon_ethernet *priv;
766 			struct net_device *dev =
767 			    alloc_etherdev(sizeof(struct octeon_ethernet));
768 			if (!dev) {
769 				pr_err("Failed to allocate ethernet device for port %d\n",
770 				       port);
771 				continue;
772 			}
773 
774 			/* Initialize the device private structure. */
775 			priv = netdev_priv(dev);
776 			priv->netdev = dev;
777 			priv->of_node = cvm_oct_node_for_port(pip, interface,
778 								port_index);
779 
780 			INIT_DELAYED_WORK(&priv->port_periodic_work,
781 					  cvm_oct_periodic_worker);
782 			priv->imode = imode;
783 			priv->port = port;
784 			priv->queue = cvmx_pko_get_base_queue(priv->port);
785 			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
786 			for (qos = 0; qos < 16; qos++)
787 				skb_queue_head_init(&priv->tx_free_list[qos]);
788 			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
789 			     qos++)
790 				cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
791 
792 			switch (priv->imode) {
793 
794 			/* These types don't support ports to IPD/PKO */
795 			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
796 			case CVMX_HELPER_INTERFACE_MODE_PCIE:
797 			case CVMX_HELPER_INTERFACE_MODE_PICMG:
798 				break;
799 
800 			case CVMX_HELPER_INTERFACE_MODE_NPI:
801 				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
802 				strcpy(dev->name, "npi%d");
803 				break;
804 
805 			case CVMX_HELPER_INTERFACE_MODE_XAUI:
806 				dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
807 				strcpy(dev->name, "xaui%d");
808 				break;
809 
810 			case CVMX_HELPER_INTERFACE_MODE_LOOP:
811 				dev->netdev_ops = &cvm_oct_npi_netdev_ops;
812 				strcpy(dev->name, "loop%d");
813 				break;
814 
815 			case CVMX_HELPER_INTERFACE_MODE_SGMII:
816 				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
817 				strcpy(dev->name, "eth%d");
818 				break;
819 
820 			case CVMX_HELPER_INTERFACE_MODE_SPI:
821 				dev->netdev_ops = &cvm_oct_spi_netdev_ops;
822 				strcpy(dev->name, "spi%d");
823 				break;
824 
825 			case CVMX_HELPER_INTERFACE_MODE_RGMII:
826 			case CVMX_HELPER_INTERFACE_MODE_GMII:
827 				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
828 				strcpy(dev->name, "eth%d");
829 				break;
830 			}
831 
832 			if (!dev->netdev_ops) {
833 				free_netdev(dev);
834 			} else if (register_netdev(dev) < 0) {
835 				pr_err("Failed to register ethernet device for interface %d, port %d\n",
836 					 interface, priv->port);
837 				free_netdev(dev);
838 			} else {
839 				cvm_oct_device[priv->port] = dev;
840 				fau -=
841 				    cvmx_pko_get_num_queues(priv->port) *
842 				    sizeof(u32);
843 				queue_delayed_work(cvm_oct_poll_queue,
844 						&priv->port_periodic_work, HZ);
845 			}
846 		}
847 	}
848 
849 	cvm_oct_tx_initialize();
850 	cvm_oct_rx_initialize();
851 
852 	/*
853 	 * 150 uS: about 10 1500-byte packets at 1GE.
854 	 */
855 	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
856 
857 	queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
858 
859 	return 0;
860 }
861 
862 static int cvm_oct_remove(struct platform_device *pdev)
863 {
864 	int port;
865 
866 	/* Disable POW interrupt */
867 	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
868 		cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
869 	else
870 		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
871 
872 	cvmx_ipd_disable();
873 
874 	/* Free the interrupt handler */
875 	free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
876 
877 	atomic_inc_return(&cvm_oct_poll_queue_stopping);
878 	cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
879 
880 	cvm_oct_rx_shutdown();
881 	cvm_oct_tx_shutdown();
882 
883 	cvmx_pko_disable();
884 
885 	/* Free the ethernet devices */
886 	for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
887 		if (cvm_oct_device[port]) {
888 			struct net_device *dev = cvm_oct_device[port];
889 			struct octeon_ethernet *priv = netdev_priv(dev);
890 
891 			cancel_delayed_work_sync(&priv->port_periodic_work);
892 
893 			cvm_oct_tx_shutdown_dev(dev);
894 			unregister_netdev(dev);
895 			free_netdev(dev);
896 			cvm_oct_device[port] = NULL;
897 		}
898 	}
899 
900 	destroy_workqueue(cvm_oct_poll_queue);
901 
902 	cvmx_pko_shutdown();
903 
904 	cvmx_ipd_free_ptr();
905 
906 	/* Free the HW pools */
907 	cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
908 			      num_packet_buffers);
909 	cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
910 			      num_packet_buffers);
911 	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
912 		cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
913 				      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
914 	return 0;
915 }
916 
917 static const struct of_device_id cvm_oct_match[] = {
918 	{
919 		.compatible = "cavium,octeon-3860-pip",
920 	},
921 	{},
922 };
923 MODULE_DEVICE_TABLE(of, cvm_oct_match);
924 
925 static struct platform_driver cvm_oct_driver = {
926 	.probe		= cvm_oct_probe,
927 	.remove		= cvm_oct_remove,
928 	.driver		= {
929 		.name	= KBUILD_MODNAME,
930 		.of_match_table = cvm_oct_match,
931 	},
932 };
933 
934 module_platform_driver(cvm_oct_driver);
935 
936 MODULE_LICENSE("GPL");
937 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
938 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
939