1 // SPDX-License-Identifier: GPL-2.0-only
2 /* 10G controller driver for Samsung SoCs
3  *
4  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5  *		http://www.samsung.com
6  *
7  * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/export.h>
13 #include <linux/io.h>
14 #include <linux/netdevice.h>
15 #include <linux/phy.h>
16 
17 #include "sxgbe_common.h"
18 #include "sxgbe_reg.h"
19 
20 /* MAC core initialization */
sxgbe_core_init(void __iomem * ioaddr)21 static void sxgbe_core_init(void __iomem *ioaddr)
22 {
23 	u32 regval;
24 
25 	/* TX configuration */
26 	regval = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
27 	/* Other configurable parameters IFP, IPG, ISR, ISM
28 	 * needs to be set if needed
29 	 */
30 	regval |= SXGBE_TX_JABBER_DISABLE;
31 	writel(regval, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
32 
33 	/* RX configuration */
34 	regval = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
35 	/* Other configurable parameters CST, SPEN, USP, GPSLCE
36 	 * WD, LM, S2KP, HDSMS, GPSL, ELEN, ARPEN needs to be
37 	 * set if needed
38 	 */
39 	regval |= SXGBE_RX_JUMBPKT_ENABLE | SXGBE_RX_ACS_ENABLE;
40 	writel(regval, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
41 }
42 
43 /* Dump MAC registers */
sxgbe_core_dump_regs(void __iomem * ioaddr)44 static void sxgbe_core_dump_regs(void __iomem *ioaddr)
45 {
46 }
47 
sxgbe_get_lpi_status(void __iomem * ioaddr,const u32 irq_status)48 static int sxgbe_get_lpi_status(void __iomem *ioaddr, const u32 irq_status)
49 {
50 	int status = 0;
51 	int lpi_status;
52 
53 	/* Reading this register shall clear all the LPI status bits */
54 	lpi_status = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
55 
56 	if (lpi_status & LPI_CTRL_STATUS_TLPIEN)
57 		status |= TX_ENTRY_LPI_MODE;
58 	if (lpi_status & LPI_CTRL_STATUS_TLPIEX)
59 		status |= TX_EXIT_LPI_MODE;
60 	if (lpi_status & LPI_CTRL_STATUS_RLPIEN)
61 		status |= RX_ENTRY_LPI_MODE;
62 	if (lpi_status & LPI_CTRL_STATUS_RLPIEX)
63 		status |= RX_EXIT_LPI_MODE;
64 
65 	return status;
66 }
67 
68 /* Handle extra events on specific interrupts hw dependent */
sxgbe_core_host_irq_status(void __iomem * ioaddr,struct sxgbe_extra_stats * x)69 static int sxgbe_core_host_irq_status(void __iomem *ioaddr,
70 				      struct sxgbe_extra_stats *x)
71 {
72 	int irq_status, status = 0;
73 
74 	irq_status = readl(ioaddr + SXGBE_CORE_INT_STATUS_REG);
75 
76 	if (unlikely(irq_status & LPI_INT_STATUS))
77 		status |= sxgbe_get_lpi_status(ioaddr, irq_status);
78 
79 	return status;
80 }
81 
82 /* Set power management mode (e.g. magic frame) */
sxgbe_core_pmt(void __iomem * ioaddr,unsigned long mode)83 static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode)
84 {
85 }
86 
87 /* Set/Get Unicast MAC addresses */
sxgbe_core_set_umac_addr(void __iomem * ioaddr,const unsigned char * addr,unsigned int reg_n)88 static void sxgbe_core_set_umac_addr(void __iomem *ioaddr,
89 				     const unsigned char *addr,
90 				     unsigned int reg_n)
91 {
92 	u32 high_word, low_word;
93 
94 	high_word = (addr[5] << 8) | (addr[4]);
95 	low_word = (addr[3] << 24) | (addr[2] << 16) |
96 		   (addr[1] << 8) | (addr[0]);
97 	writel(high_word, ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
98 	writel(low_word, ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
99 }
100 
sxgbe_core_get_umac_addr(void __iomem * ioaddr,unsigned char * addr,unsigned int reg_n)101 static void sxgbe_core_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
102 				     unsigned int reg_n)
103 {
104 	u32 high_word, low_word;
105 
106 	high_word = readl(ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
107 	low_word = readl(ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
108 
109 	/* extract and assign address */
110 	addr[5] = (high_word & 0x0000FF00) >> 8;
111 	addr[4] = (high_word & 0x000000FF);
112 	addr[3] = (low_word & 0xFF000000) >> 24;
113 	addr[2] = (low_word & 0x00FF0000) >> 16;
114 	addr[1] = (low_word & 0x0000FF00) >> 8;
115 	addr[0] = (low_word & 0x000000FF);
116 }
117 
sxgbe_enable_tx(void __iomem * ioaddr,bool enable)118 static void sxgbe_enable_tx(void __iomem *ioaddr, bool enable)
119 {
120 	u32 tx_config;
121 
122 	tx_config = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
123 	tx_config &= ~SXGBE_TX_ENABLE;
124 
125 	if (enable)
126 		tx_config |= SXGBE_TX_ENABLE;
127 	writel(tx_config, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
128 }
129 
sxgbe_enable_rx(void __iomem * ioaddr,bool enable)130 static void sxgbe_enable_rx(void __iomem *ioaddr, bool enable)
131 {
132 	u32 rx_config;
133 
134 	rx_config = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
135 	rx_config &= ~SXGBE_RX_ENABLE;
136 
137 	if (enable)
138 		rx_config |= SXGBE_RX_ENABLE;
139 	writel(rx_config, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
140 }
141 
sxgbe_get_controller_version(void __iomem * ioaddr)142 static int sxgbe_get_controller_version(void __iomem *ioaddr)
143 {
144 	return readl(ioaddr + SXGBE_CORE_VERSION_REG);
145 }
146 
147 /* If supported then get the optional core features */
sxgbe_get_hw_feature(void __iomem * ioaddr,unsigned char feature_index)148 static unsigned int sxgbe_get_hw_feature(void __iomem *ioaddr,
149 					 unsigned char feature_index)
150 {
151 	return readl(ioaddr + (SXGBE_CORE_HW_FEA_REG(feature_index)));
152 }
153 
sxgbe_core_set_speed(void __iomem * ioaddr,unsigned char speed)154 static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
155 {
156 	u32 tx_cfg = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
157 
158 	/* clear the speed bits */
159 	tx_cfg &= ~0x60000000;
160 	tx_cfg |= (speed << SXGBE_SPEED_LSHIFT);
161 
162 	/* set the speed */
163 	writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
164 }
165 
sxgbe_core_enable_rxqueue(void __iomem * ioaddr,int queue_num)166 static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num)
167 {
168 	u32 reg_val;
169 
170 	reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
171 	reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
172 	reg_val |= SXGBE_CORE_RXQ_ENABLE;
173 	writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
174 }
175 
sxgbe_core_disable_rxqueue(void __iomem * ioaddr,int queue_num)176 static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num)
177 {
178 	u32 reg_val;
179 
180 	reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
181 	reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
182 	reg_val |= SXGBE_CORE_RXQ_DISABLE;
183 	writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
184 }
185 
sxgbe_set_eee_mode(void __iomem * ioaddr)186 static void  sxgbe_set_eee_mode(void __iomem *ioaddr)
187 {
188 	u32 ctrl;
189 
190 	/* Enable the LPI mode for transmit path with Tx automate bit set.
191 	 * When Tx Automate bit is set, MAC internally handles the entry
192 	 * to LPI mode after all outstanding and pending packets are
193 	 * transmitted.
194 	 */
195 	ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
196 	ctrl |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA;
197 	writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
198 }
199 
sxgbe_reset_eee_mode(void __iomem * ioaddr)200 static void  sxgbe_reset_eee_mode(void __iomem *ioaddr)
201 {
202 	u32 ctrl;
203 
204 	ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
205 	ctrl &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA);
206 	writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
207 }
208 
sxgbe_set_eee_pls(void __iomem * ioaddr,const int link)209 static void  sxgbe_set_eee_pls(void __iomem *ioaddr, const int link)
210 {
211 	u32 ctrl;
212 
213 	ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
214 
215 	/* If the PHY link status is UP then set PLS */
216 	if (link)
217 		ctrl |= LPI_CTRL_STATUS_PLS;
218 	else
219 		ctrl &= ~LPI_CTRL_STATUS_PLS;
220 
221 	writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
222 }
223 
sxgbe_set_eee_timer(void __iomem * ioaddr,const int ls,const int tw)224 static void  sxgbe_set_eee_timer(void __iomem *ioaddr,
225 				 const int ls, const int tw)
226 {
227 	int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
228 
229 	/* Program the timers in the LPI timer control register:
230 	 * LS: minimum time (ms) for which the link
231 	 *  status from PHY should be ok before transmitting
232 	 *  the LPI pattern.
233 	 * TW: minimum time (us) for which the core waits
234 	 *  after it has stopped transmitting the LPI pattern.
235 	 */
236 	writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL);
237 }
238 
sxgbe_enable_rx_csum(void __iomem * ioaddr)239 static void sxgbe_enable_rx_csum(void __iomem *ioaddr)
240 {
241 	u32 ctrl;
242 
243 	ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
244 	ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
245 	writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
246 }
247 
sxgbe_disable_rx_csum(void __iomem * ioaddr)248 static void sxgbe_disable_rx_csum(void __iomem *ioaddr)
249 {
250 	u32 ctrl;
251 
252 	ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
253 	ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
254 	writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
255 }
256 
257 static const struct sxgbe_core_ops core_ops = {
258 	.core_init		= sxgbe_core_init,
259 	.dump_regs		= sxgbe_core_dump_regs,
260 	.host_irq_status	= sxgbe_core_host_irq_status,
261 	.pmt			= sxgbe_core_pmt,
262 	.set_umac_addr		= sxgbe_core_set_umac_addr,
263 	.get_umac_addr		= sxgbe_core_get_umac_addr,
264 	.enable_rx		= sxgbe_enable_rx,
265 	.enable_tx		= sxgbe_enable_tx,
266 	.get_controller_version	= sxgbe_get_controller_version,
267 	.get_hw_feature		= sxgbe_get_hw_feature,
268 	.set_speed		= sxgbe_core_set_speed,
269 	.set_eee_mode		= sxgbe_set_eee_mode,
270 	.reset_eee_mode		= sxgbe_reset_eee_mode,
271 	.set_eee_timer		= sxgbe_set_eee_timer,
272 	.set_eee_pls		= sxgbe_set_eee_pls,
273 	.enable_rx_csum		= sxgbe_enable_rx_csum,
274 	.disable_rx_csum	= sxgbe_disable_rx_csum,
275 	.enable_rxqueue		= sxgbe_core_enable_rxqueue,
276 	.disable_rxqueue	= sxgbe_core_disable_rxqueue,
277 };
278 
sxgbe_get_core_ops(void)279 const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
280 {
281 	return &core_ops;
282 }
283