1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/types.h>
4 #include <linux/clk.h>
5 #include <linux/platform_device.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/acpi.h>
8 #include <linux/of_mdio.h>
9 #include <linux/etherdevice.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/netlink.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 
16 #include <net/tcp.h>
17 #include <net/page_pool.h>
18 #include <net/ip6_checksum.h>
19 
20 #define NETSEC_REG_SOFT_RST			0x104
21 #define NETSEC_REG_COM_INIT			0x120
22 
23 #define NETSEC_REG_TOP_STATUS			0x200
24 #define NETSEC_IRQ_RX				BIT(1)
25 #define NETSEC_IRQ_TX				BIT(0)
26 
27 #define NETSEC_REG_TOP_INTEN			0x204
28 #define NETSEC_REG_INTEN_SET			0x234
29 #define NETSEC_REG_INTEN_CLR			0x238
30 
31 #define NETSEC_REG_NRM_TX_STATUS		0x400
32 #define NETSEC_REG_NRM_TX_INTEN			0x404
33 #define NETSEC_REG_NRM_TX_INTEN_SET		0x428
34 #define NETSEC_REG_NRM_TX_INTEN_CLR		0x42c
35 #define NRM_TX_ST_NTOWNR	BIT(17)
36 #define NRM_TX_ST_TR_ERR	BIT(16)
37 #define NRM_TX_ST_TXDONE	BIT(15)
38 #define NRM_TX_ST_TMREXP	BIT(14)
39 
40 #define NETSEC_REG_NRM_RX_STATUS		0x440
41 #define NETSEC_REG_NRM_RX_INTEN			0x444
42 #define NETSEC_REG_NRM_RX_INTEN_SET		0x468
43 #define NETSEC_REG_NRM_RX_INTEN_CLR		0x46c
44 #define NRM_RX_ST_RC_ERR	BIT(16)
45 #define NRM_RX_ST_PKTCNT	BIT(15)
46 #define NRM_RX_ST_TMREXP	BIT(14)
47 
48 #define NETSEC_REG_PKT_CMD_BUF			0xd0
49 
50 #define NETSEC_REG_CLK_EN			0x100
51 
52 #define NETSEC_REG_PKT_CTRL			0x140
53 
54 #define NETSEC_REG_DMA_TMR_CTRL			0x20c
55 #define NETSEC_REG_F_TAIKI_MC_VER		0x22c
56 #define NETSEC_REG_F_TAIKI_VER			0x230
57 #define NETSEC_REG_DMA_HM_CTRL			0x214
58 #define NETSEC_REG_DMA_MH_CTRL			0x220
59 #define NETSEC_REG_ADDR_DIS_CORE		0x218
60 #define NETSEC_REG_DMAC_HM_CMD_BUF		0x210
61 #define NETSEC_REG_DMAC_MH_CMD_BUF		0x21c
62 
63 #define NETSEC_REG_NRM_TX_PKTCNT		0x410
64 
65 #define NETSEC_REG_NRM_TX_DONE_PKTCNT		0x414
66 #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT	0x418
67 
68 #define NETSEC_REG_NRM_TX_TMR			0x41c
69 
70 #define NETSEC_REG_NRM_RX_PKTCNT		0x454
71 #define NETSEC_REG_NRM_RX_RXINT_PKTCNT		0x458
72 #define NETSEC_REG_NRM_TX_TXINT_TMR		0x420
73 #define NETSEC_REG_NRM_RX_RXINT_TMR		0x460
74 
75 #define NETSEC_REG_NRM_RX_TMR			0x45c
76 
77 #define NETSEC_REG_NRM_TX_DESC_START_UP		0x434
78 #define NETSEC_REG_NRM_TX_DESC_START_LW		0x408
79 #define NETSEC_REG_NRM_RX_DESC_START_UP		0x474
80 #define NETSEC_REG_NRM_RX_DESC_START_LW		0x448
81 
82 #define NETSEC_REG_NRM_TX_CONFIG		0x430
83 #define NETSEC_REG_NRM_RX_CONFIG		0x470
84 
85 #define MAC_REG_STATUS				0x1024
86 #define MAC_REG_DATA				0x11c0
87 #define MAC_REG_CMD				0x11c4
88 #define MAC_REG_FLOW_TH				0x11cc
89 #define MAC_REG_INTF_SEL			0x11d4
90 #define MAC_REG_DESC_INIT			0x11fc
91 #define MAC_REG_DESC_SOFT_RST			0x1204
92 #define NETSEC_REG_MODE_TRANS_COMP_STATUS	0x500
93 
94 #define GMAC_REG_MCR				0x0000
95 #define GMAC_REG_MFFR				0x0004
96 #define GMAC_REG_GAR				0x0010
97 #define GMAC_REG_GDR				0x0014
98 #define GMAC_REG_FCR				0x0018
99 #define GMAC_REG_BMR				0x1000
100 #define GMAC_REG_RDLAR				0x100c
101 #define GMAC_REG_TDLAR				0x1010
102 #define GMAC_REG_OMR				0x1018
103 
104 #define MHZ(n)		((n) * 1000 * 1000)
105 
106 #define NETSEC_TX_SHIFT_OWN_FIELD		31
107 #define NETSEC_TX_SHIFT_LD_FIELD		30
108 #define NETSEC_TX_SHIFT_DRID_FIELD		24
109 #define NETSEC_TX_SHIFT_PT_FIELD		21
110 #define NETSEC_TX_SHIFT_TDRID_FIELD		16
111 #define NETSEC_TX_SHIFT_CC_FIELD		15
112 #define NETSEC_TX_SHIFT_FS_FIELD		9
113 #define NETSEC_TX_LAST				8
114 #define NETSEC_TX_SHIFT_CO			7
115 #define NETSEC_TX_SHIFT_SO			6
116 #define NETSEC_TX_SHIFT_TRS_FIELD		4
117 
118 #define NETSEC_RX_PKT_OWN_FIELD			31
119 #define NETSEC_RX_PKT_LD_FIELD			30
120 #define NETSEC_RX_PKT_SDRID_FIELD		24
121 #define NETSEC_RX_PKT_FR_FIELD			23
122 #define NETSEC_RX_PKT_ER_FIELD			21
123 #define NETSEC_RX_PKT_ERR_FIELD			16
124 #define NETSEC_RX_PKT_TDRID_FIELD		12
125 #define NETSEC_RX_PKT_FS_FIELD			9
126 #define NETSEC_RX_PKT_LS_FIELD			8
127 #define NETSEC_RX_PKT_CO_FIELD			6
128 
129 #define NETSEC_RX_PKT_ERR_MASK			3
130 
131 #define NETSEC_MAX_TX_PKT_LEN			1518
132 #define NETSEC_MAX_TX_JUMBO_PKT_LEN		9018
133 
134 #define NETSEC_RING_GMAC			15
135 #define NETSEC_RING_MAX				2
136 
137 #define NETSEC_TCP_SEG_LEN_MAX			1460
138 #define NETSEC_TCP_JUMBO_SEG_LEN_MAX		8960
139 
140 #define NETSEC_RX_CKSUM_NOTAVAIL		0
141 #define NETSEC_RX_CKSUM_OK			1
142 #define NETSEC_RX_CKSUM_NG			2
143 
144 #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END	BIT(20)
145 #define NETSEC_IRQ_TRANSITION_COMPLETE		BIT(4)
146 
147 #define NETSEC_MODE_TRANS_COMP_IRQ_N2T		BIT(20)
148 #define NETSEC_MODE_TRANS_COMP_IRQ_T2N		BIT(19)
149 
150 #define NETSEC_INT_PKTCNT_MAX			2047
151 
152 #define NETSEC_FLOW_START_TH_MAX		95
153 #define NETSEC_FLOW_STOP_TH_MAX			95
154 #define NETSEC_FLOW_PAUSE_TIME_MIN		5
155 
156 #define NETSEC_CLK_EN_REG_DOM_ALL		0x3f
157 
158 #define NETSEC_PKT_CTRL_REG_MODE_NRM		BIT(28)
159 #define NETSEC_PKT_CTRL_REG_EN_JUMBO		BIT(27)
160 #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER	BIT(3)
161 #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE	BIT(2)
162 #define NETSEC_PKT_CTRL_REG_LOG_HD_ER		BIT(1)
163 #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH	BIT(0)
164 
165 #define NETSEC_CLK_EN_REG_DOM_G			BIT(5)
166 #define NETSEC_CLK_EN_REG_DOM_C			BIT(1)
167 #define NETSEC_CLK_EN_REG_DOM_D			BIT(0)
168 
169 #define NETSEC_COM_INIT_REG_DB			BIT(2)
170 #define NETSEC_COM_INIT_REG_CLS			BIT(1)
171 #define NETSEC_COM_INIT_REG_ALL			(NETSEC_COM_INIT_REG_CLS | \
172 						 NETSEC_COM_INIT_REG_DB)
173 
174 #define NETSEC_SOFT_RST_REG_RESET		0
175 #define NETSEC_SOFT_RST_REG_RUN			BIT(31)
176 
177 #define NETSEC_DMA_CTRL_REG_STOP		1
178 #define MH_CTRL__MODE_TRANS			BIT(20)
179 
180 #define NETSEC_GMAC_CMD_ST_READ			0
181 #define NETSEC_GMAC_CMD_ST_WRITE		BIT(28)
182 #define NETSEC_GMAC_CMD_ST_BUSY			BIT(31)
183 
184 #define NETSEC_GMAC_BMR_REG_COMMON		0x00412080
185 #define NETSEC_GMAC_BMR_REG_RESET		0x00020181
186 #define NETSEC_GMAC_BMR_REG_SWR			0x00000001
187 
188 #define NETSEC_GMAC_OMR_REG_ST			BIT(13)
189 #define NETSEC_GMAC_OMR_REG_SR			BIT(1)
190 
191 #define NETSEC_GMAC_MCR_REG_IBN			BIT(30)
192 #define NETSEC_GMAC_MCR_REG_CST			BIT(25)
193 #define NETSEC_GMAC_MCR_REG_JE			BIT(20)
194 #define NETSEC_MCR_PS				BIT(15)
195 #define NETSEC_GMAC_MCR_REG_FES			BIT(14)
196 #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON	0x0000280c
197 #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON	0x0001a00c
198 
199 #define NETSEC_FCR_RFE				BIT(2)
200 #define NETSEC_FCR_TFE				BIT(1)
201 
202 #define NETSEC_GMAC_GAR_REG_GW			BIT(1)
203 #define NETSEC_GMAC_GAR_REG_GB			BIT(0)
204 
205 #define NETSEC_GMAC_GAR_REG_SHIFT_PA		11
206 #define NETSEC_GMAC_GAR_REG_SHIFT_GR		6
207 #define GMAC_REG_SHIFT_CR_GAR			2
208 
209 #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ	2
210 #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ	3
211 #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ	0
212 #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ	1
213 #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ	4
214 #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ	5
215 
216 #define NETSEC_GMAC_RDLAR_REG_COMMON		0x18000
217 #define NETSEC_GMAC_TDLAR_REG_COMMON		0x1c000
218 
219 #define NETSEC_REG_NETSEC_VER_F_TAIKI		0x50000
220 
221 #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP	BIT(31)
222 #define NETSEC_REG_DESC_RING_CONFIG_CH_RST	BIT(30)
223 #define NETSEC_REG_DESC_TMR_MODE		4
224 #define NETSEC_REG_DESC_ENDIAN			0
225 
226 #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST	1
227 #define NETSEC_MAC_DESC_INIT_REG_INIT		1
228 
229 #define NETSEC_EEPROM_MAC_ADDRESS		0x00
230 #define NETSEC_EEPROM_HM_ME_ADDRESS_H		0x08
231 #define NETSEC_EEPROM_HM_ME_ADDRESS_L		0x0C
232 #define NETSEC_EEPROM_HM_ME_SIZE		0x10
233 #define NETSEC_EEPROM_MH_ME_ADDRESS_H		0x14
234 #define NETSEC_EEPROM_MH_ME_ADDRESS_L		0x18
235 #define NETSEC_EEPROM_MH_ME_SIZE		0x1C
236 #define NETSEC_EEPROM_PKT_ME_ADDRESS		0x20
237 #define NETSEC_EEPROM_PKT_ME_SIZE		0x24
238 
239 #define DESC_NUM	256
240 
241 #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
242 #define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
243 			       NET_IP_ALIGN)
244 #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
245 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
246 #define NETSEC_RX_BUF_SIZE	(PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
247 
248 #define DESC_SZ	sizeof(struct netsec_de)
249 
250 #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x)	((x) & 0xffff0000)
251 
252 #define NETSEC_XDP_PASS          0
253 #define NETSEC_XDP_CONSUMED      BIT(0)
254 #define NETSEC_XDP_TX            BIT(1)
255 #define NETSEC_XDP_REDIR         BIT(2)
256 
257 enum ring_id {
258 	NETSEC_RING_TX = 0,
259 	NETSEC_RING_RX
260 };
261 
262 enum buf_type {
263 	TYPE_NETSEC_SKB = 0,
264 	TYPE_NETSEC_XDP_TX,
265 	TYPE_NETSEC_XDP_NDO,
266 };
267 
268 struct netsec_desc {
269 	union {
270 		struct sk_buff *skb;
271 		struct xdp_frame *xdpf;
272 	};
273 	dma_addr_t dma_addr;
274 	void *addr;
275 	u16 len;
276 	u8 buf_type;
277 };
278 
279 struct netsec_desc_ring {
280 	dma_addr_t desc_dma;
281 	struct netsec_desc *desc;
282 	void *vaddr;
283 	u16 head, tail;
284 	u16 xdp_xmit; /* netsec_xdp_xmit packets */
285 	struct page_pool *page_pool;
286 	struct xdp_rxq_info xdp_rxq;
287 	spinlock_t lock; /* XDP tx queue locking */
288 };
289 
290 struct netsec_priv {
291 	struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
292 	struct ethtool_coalesce et_coalesce;
293 	struct bpf_prog *xdp_prog;
294 	spinlock_t reglock; /* protect reg access */
295 	struct napi_struct napi;
296 	phy_interface_t phy_interface;
297 	struct net_device *ndev;
298 	struct device_node *phy_np;
299 	struct phy_device *phydev;
300 	struct mii_bus *mii_bus;
301 	void __iomem *ioaddr;
302 	void __iomem *eeprom_base;
303 	struct device *dev;
304 	struct clk *clk;
305 	u32 msg_enable;
306 	u32 freq;
307 	u32 phy_addr;
308 	bool rx_cksum_offload_flag;
309 };
310 
311 struct netsec_de { /* Netsec Descriptor layout */
312 	u32 attr;
313 	u32 data_buf_addr_up;
314 	u32 data_buf_addr_lw;
315 	u32 buf_len_info;
316 };
317 
318 struct netsec_tx_pkt_ctrl {
319 	u16 tcp_seg_len;
320 	bool tcp_seg_offload_flag;
321 	bool cksum_offload_flag;
322 };
323 
324 struct netsec_rx_pkt_info {
325 	int rx_cksum_result;
326 	int err_code;
327 	bool err_flag;
328 };
329 
330 static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
331 {
332 	writel(val, priv->ioaddr + reg_addr);
333 }
334 
335 static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
336 {
337 	return readl(priv->ioaddr + reg_addr);
338 }
339 
340 /************* MDIO BUS OPS FOLLOW *************/
341 
342 #define TIMEOUT_SPINS_MAC		1000
343 #define TIMEOUT_SECONDARY_MS_MAC	100
344 
345 static u32 netsec_clk_type(u32 freq)
346 {
347 	if (freq < MHZ(35))
348 		return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
349 	if (freq < MHZ(60))
350 		return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
351 	if (freq < MHZ(100))
352 		return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
353 	if (freq < MHZ(150))
354 		return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
355 	if (freq < MHZ(250))
356 		return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
357 
358 	return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
359 }
360 
361 static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
362 {
363 	u32 timeout = TIMEOUT_SPINS_MAC;
364 
365 	while (--timeout && netsec_read(priv, addr) & mask)
366 		cpu_relax();
367 	if (timeout)
368 		return 0;
369 
370 	timeout = TIMEOUT_SECONDARY_MS_MAC;
371 	while (--timeout && netsec_read(priv, addr) & mask)
372 		usleep_range(1000, 2000);
373 
374 	if (timeout)
375 		return 0;
376 
377 	netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
378 
379 	return -ETIMEDOUT;
380 }
381 
382 static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
383 {
384 	netsec_write(priv, MAC_REG_DATA, value);
385 	netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
386 	return netsec_wait_while_busy(priv,
387 				      MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
388 }
389 
390 static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
391 {
392 	int ret;
393 
394 	netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
395 	ret = netsec_wait_while_busy(priv,
396 				     MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
397 	if (ret)
398 		return ret;
399 
400 	*read = netsec_read(priv, MAC_REG_DATA);
401 
402 	return 0;
403 }
404 
405 static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
406 				      u32 addr, u32 mask)
407 {
408 	u32 timeout = TIMEOUT_SPINS_MAC;
409 	int ret, data;
410 
411 	do {
412 		ret = netsec_mac_read(priv, addr, &data);
413 		if (ret)
414 			break;
415 		cpu_relax();
416 	} while (--timeout && (data & mask));
417 
418 	if (timeout)
419 		return 0;
420 
421 	timeout = TIMEOUT_SECONDARY_MS_MAC;
422 	do {
423 		usleep_range(1000, 2000);
424 
425 		ret = netsec_mac_read(priv, addr, &data);
426 		if (ret)
427 			break;
428 		cpu_relax();
429 	} while (--timeout && (data & mask));
430 
431 	if (timeout && !ret)
432 		return 0;
433 
434 	netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
435 
436 	return -ETIMEDOUT;
437 }
438 
439 static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
440 {
441 	struct phy_device *phydev = priv->ndev->phydev;
442 	u32 value = 0;
443 
444 	value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
445 				 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
446 
447 	if (phydev->speed != SPEED_1000)
448 		value |= NETSEC_MCR_PS;
449 
450 	if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
451 	    phydev->speed == SPEED_100)
452 		value |= NETSEC_GMAC_MCR_REG_FES;
453 
454 	value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
455 
456 	if (phy_interface_mode_is_rgmii(priv->phy_interface))
457 		value |= NETSEC_GMAC_MCR_REG_IBN;
458 
459 	if (netsec_mac_write(priv, GMAC_REG_MCR, value))
460 		return -ETIMEDOUT;
461 
462 	return 0;
463 }
464 
465 static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
466 
467 static int netsec_phy_write(struct mii_bus *bus,
468 			    int phy_addr, int reg, u16 val)
469 {
470 	int status;
471 	struct netsec_priv *priv = bus->priv;
472 
473 	if (netsec_mac_write(priv, GMAC_REG_GDR, val))
474 		return -ETIMEDOUT;
475 	if (netsec_mac_write(priv, GMAC_REG_GAR,
476 			     phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
477 			     reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
478 			     NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
479 			     (netsec_clk_type(priv->freq) <<
480 			      GMAC_REG_SHIFT_CR_GAR)))
481 		return -ETIMEDOUT;
482 
483 	status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
484 					    NETSEC_GMAC_GAR_REG_GB);
485 
486 	/* Developerbox implements RTL8211E PHY and there is
487 	 * a compatibility problem with F_GMAC4.
488 	 * RTL8211E expects MDC clock must be kept toggling for several
489 	 * clock cycle with MDIO high before entering the IDLE state.
490 	 * To meet this requirement, netsec driver needs to issue dummy
491 	 * read(e.g. read PHYID1(offset 0x2) register) right after write.
492 	 */
493 	netsec_phy_read(bus, phy_addr, MII_PHYSID1);
494 
495 	return status;
496 }
497 
498 static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
499 {
500 	struct netsec_priv *priv = bus->priv;
501 	u32 data;
502 	int ret;
503 
504 	if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
505 			     phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
506 			     reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
507 			     (netsec_clk_type(priv->freq) <<
508 			      GMAC_REG_SHIFT_CR_GAR)))
509 		return -ETIMEDOUT;
510 
511 	ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
512 					 NETSEC_GMAC_GAR_REG_GB);
513 	if (ret)
514 		return ret;
515 
516 	ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
517 	if (ret)
518 		return ret;
519 
520 	return data;
521 }
522 
523 /************* ETHTOOL_OPS FOLLOW *************/
524 
525 static void netsec_et_get_drvinfo(struct net_device *net_device,
526 				  struct ethtool_drvinfo *info)
527 {
528 	strlcpy(info->driver, "netsec", sizeof(info->driver));
529 	strlcpy(info->bus_info, dev_name(net_device->dev.parent),
530 		sizeof(info->bus_info));
531 }
532 
533 static int netsec_et_get_coalesce(struct net_device *net_device,
534 				  struct ethtool_coalesce *et_coalesce)
535 {
536 	struct netsec_priv *priv = netdev_priv(net_device);
537 
538 	*et_coalesce = priv->et_coalesce;
539 
540 	return 0;
541 }
542 
543 static int netsec_et_set_coalesce(struct net_device *net_device,
544 				  struct ethtool_coalesce *et_coalesce)
545 {
546 	struct netsec_priv *priv = netdev_priv(net_device);
547 
548 	priv->et_coalesce = *et_coalesce;
549 
550 	if (priv->et_coalesce.tx_coalesce_usecs < 50)
551 		priv->et_coalesce.tx_coalesce_usecs = 50;
552 	if (priv->et_coalesce.tx_max_coalesced_frames < 1)
553 		priv->et_coalesce.tx_max_coalesced_frames = 1;
554 
555 	netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
556 		     priv->et_coalesce.tx_max_coalesced_frames);
557 	netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
558 		     priv->et_coalesce.tx_coalesce_usecs);
559 	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
560 	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
561 
562 	if (priv->et_coalesce.rx_coalesce_usecs < 50)
563 		priv->et_coalesce.rx_coalesce_usecs = 50;
564 	if (priv->et_coalesce.rx_max_coalesced_frames < 1)
565 		priv->et_coalesce.rx_max_coalesced_frames = 1;
566 
567 	netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
568 		     priv->et_coalesce.rx_max_coalesced_frames);
569 	netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
570 		     priv->et_coalesce.rx_coalesce_usecs);
571 	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
572 	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
573 
574 	return 0;
575 }
576 
577 static u32 netsec_et_get_msglevel(struct net_device *dev)
578 {
579 	struct netsec_priv *priv = netdev_priv(dev);
580 
581 	return priv->msg_enable;
582 }
583 
584 static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
585 {
586 	struct netsec_priv *priv = netdev_priv(dev);
587 
588 	priv->msg_enable = datum;
589 }
590 
591 static const struct ethtool_ops netsec_ethtool_ops = {
592 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
593 				     ETHTOOL_COALESCE_MAX_FRAMES,
594 	.get_drvinfo		= netsec_et_get_drvinfo,
595 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
596 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
597 	.get_link		= ethtool_op_get_link,
598 	.get_coalesce		= netsec_et_get_coalesce,
599 	.set_coalesce		= netsec_et_set_coalesce,
600 	.get_msglevel		= netsec_et_get_msglevel,
601 	.set_msglevel		= netsec_et_set_msglevel,
602 };
603 
604 /************* NETDEV_OPS FOLLOW *************/
605 
606 
607 static void netsec_set_rx_de(struct netsec_priv *priv,
608 			     struct netsec_desc_ring *dring, u16 idx,
609 			     const struct netsec_desc *desc)
610 {
611 	struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
612 	u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
613 		   (1 << NETSEC_RX_PKT_FS_FIELD) |
614 		   (1 << NETSEC_RX_PKT_LS_FIELD);
615 
616 	if (idx == DESC_NUM - 1)
617 		attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
618 
619 	de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
620 	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
621 	de->buf_len_info = desc->len;
622 	de->attr = attr;
623 	dma_wmb();
624 
625 	dring->desc[idx].dma_addr = desc->dma_addr;
626 	dring->desc[idx].addr = desc->addr;
627 	dring->desc[idx].len = desc->len;
628 }
629 
630 static bool netsec_clean_tx_dring(struct netsec_priv *priv)
631 {
632 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
633 	struct netsec_de *entry;
634 	int tail = dring->tail;
635 	unsigned int bytes;
636 	int cnt = 0;
637 
638 	spin_lock(&dring->lock);
639 
640 	bytes = 0;
641 	entry = dring->vaddr + DESC_SZ * tail;
642 
643 	while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
644 	       cnt < DESC_NUM) {
645 		struct netsec_desc *desc;
646 		int eop;
647 
648 		desc = &dring->desc[tail];
649 		eop = (entry->attr >> NETSEC_TX_LAST) & 1;
650 		dma_rmb();
651 
652 		/* if buf_type is either TYPE_NETSEC_SKB or
653 		 * TYPE_NETSEC_XDP_NDO we mapped it
654 		 */
655 		if (desc->buf_type != TYPE_NETSEC_XDP_TX)
656 			dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
657 					 DMA_TO_DEVICE);
658 
659 		if (!eop)
660 			goto next;
661 
662 		if (desc->buf_type == TYPE_NETSEC_SKB) {
663 			bytes += desc->skb->len;
664 			dev_kfree_skb(desc->skb);
665 		} else {
666 			bytes += desc->xdpf->len;
667 			xdp_return_frame(desc->xdpf);
668 		}
669 next:
670 		/* clean up so netsec_uninit_pkt_dring() won't free the skb
671 		 * again
672 		 */
673 		*desc = (struct netsec_desc){};
674 
675 		/* entry->attr is not going to be accessed by the NIC until
676 		 * netsec_set_tx_de() is called. No need for a dma_wmb() here
677 		 */
678 		entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
679 		/* move tail ahead */
680 		dring->tail = (tail + 1) % DESC_NUM;
681 
682 		tail = dring->tail;
683 		entry = dring->vaddr + DESC_SZ * tail;
684 		cnt++;
685 	}
686 
687 	spin_unlock(&dring->lock);
688 
689 	if (!cnt)
690 		return false;
691 
692 	/* reading the register clears the irq */
693 	netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
694 
695 	priv->ndev->stats.tx_packets += cnt;
696 	priv->ndev->stats.tx_bytes += bytes;
697 
698 	netdev_completed_queue(priv->ndev, cnt, bytes);
699 
700 	return true;
701 }
702 
703 static void netsec_process_tx(struct netsec_priv *priv)
704 {
705 	struct net_device *ndev = priv->ndev;
706 	bool cleaned;
707 
708 	cleaned = netsec_clean_tx_dring(priv);
709 
710 	if (cleaned && netif_queue_stopped(ndev)) {
711 		/* Make sure we update the value, anyone stopping the queue
712 		 * after this will read the proper consumer idx
713 		 */
714 		smp_wmb();
715 		netif_wake_queue(ndev);
716 	}
717 }
718 
719 static void *netsec_alloc_rx_data(struct netsec_priv *priv,
720 				  dma_addr_t *dma_handle, u16 *desc_len)
721 
722 {
723 
724 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
725 	struct page *page;
726 
727 	page = page_pool_dev_alloc_pages(dring->page_pool);
728 	if (!page)
729 		return NULL;
730 
731 	/* We allocate the same buffer length for XDP and non-XDP cases.
732 	 * page_pool API will map the whole page, skip what's needed for
733 	 * network payloads and/or XDP
734 	 */
735 	*dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
736 	/* Make sure the incoming payload fits in the page for XDP and non-XDP
737 	 * cases and reserve enough space for headroom + skb_shared_info
738 	 */
739 	*desc_len = NETSEC_RX_BUF_SIZE;
740 
741 	return page_address(page);
742 }
743 
744 static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
745 {
746 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
747 	u16 idx = from;
748 
749 	while (num) {
750 		netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
751 		idx++;
752 		if (idx >= DESC_NUM)
753 			idx = 0;
754 		num--;
755 	}
756 }
757 
758 static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts)
759 {
760 	if (likely(pkts))
761 		netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts);
762 }
763 
764 static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
765 				   u16 pkts)
766 {
767 	if (xdp_res & NETSEC_XDP_REDIR)
768 		xdp_do_flush_map();
769 
770 	if (xdp_res & NETSEC_XDP_TX)
771 		netsec_xdp_ring_tx_db(priv, pkts);
772 }
773 
774 static void netsec_set_tx_de(struct netsec_priv *priv,
775 			     struct netsec_desc_ring *dring,
776 			     const struct netsec_tx_pkt_ctrl *tx_ctrl,
777 			     const struct netsec_desc *desc, void *buf)
778 {
779 	int idx = dring->head;
780 	struct netsec_de *de;
781 	u32 attr;
782 
783 	de = dring->vaddr + (DESC_SZ * idx);
784 
785 	attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
786 	       (1 << NETSEC_TX_SHIFT_PT_FIELD) |
787 	       (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
788 	       (1 << NETSEC_TX_SHIFT_FS_FIELD) |
789 	       (1 << NETSEC_TX_LAST) |
790 	       (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
791 	       (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
792 	       (1 << NETSEC_TX_SHIFT_TRS_FIELD);
793 	if (idx == DESC_NUM - 1)
794 		attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
795 
796 	de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
797 	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
798 	de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
799 	de->attr = attr;
800 
801 	dring->desc[idx] = *desc;
802 	if (desc->buf_type == TYPE_NETSEC_SKB)
803 		dring->desc[idx].skb = buf;
804 	else if (desc->buf_type == TYPE_NETSEC_XDP_TX ||
805 		 desc->buf_type == TYPE_NETSEC_XDP_NDO)
806 		dring->desc[idx].xdpf = buf;
807 
808 	/* move head ahead */
809 	dring->head = (dring->head + 1) % DESC_NUM;
810 }
811 
812 /* The current driver only supports 1 Txq, this should run under spin_lock() */
813 static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
814 				struct xdp_frame *xdpf, bool is_ndo)
815 
816 {
817 	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
818 	struct page *page = virt_to_page(xdpf->data);
819 	struct netsec_tx_pkt_ctrl tx_ctrl = {};
820 	struct netsec_desc tx_desc;
821 	dma_addr_t dma_handle;
822 	u16 filled;
823 
824 	if (tx_ring->head >= tx_ring->tail)
825 		filled = tx_ring->head - tx_ring->tail;
826 	else
827 		filled = tx_ring->head + DESC_NUM - tx_ring->tail;
828 
829 	if (DESC_NUM - filled <= 1)
830 		return NETSEC_XDP_CONSUMED;
831 
832 	if (is_ndo) {
833 		/* this is for ndo_xdp_xmit, the buffer needs mapping before
834 		 * sending
835 		 */
836 		dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len,
837 					    DMA_TO_DEVICE);
838 		if (dma_mapping_error(priv->dev, dma_handle))
839 			return NETSEC_XDP_CONSUMED;
840 		tx_desc.buf_type = TYPE_NETSEC_XDP_NDO;
841 	} else {
842 		/* This is the device Rx buffer from page_pool. No need to remap
843 		 * just sync and send it
844 		 */
845 		struct netsec_desc_ring *rx_ring =
846 			&priv->desc_ring[NETSEC_RING_RX];
847 		enum dma_data_direction dma_dir =
848 			page_pool_get_dma_dir(rx_ring->page_pool);
849 
850 		dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
851 			sizeof(*xdpf);
852 		dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
853 					   dma_dir);
854 		tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
855 	}
856 
857 	tx_desc.dma_addr = dma_handle;
858 	tx_desc.addr = xdpf->data;
859 	tx_desc.len = xdpf->len;
860 
861 	netdev_sent_queue(priv->ndev, xdpf->len);
862 	netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
863 
864 	return NETSEC_XDP_TX;
865 }
866 
867 static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
868 {
869 	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
870 	struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
871 	u32 ret;
872 
873 	if (unlikely(!xdpf))
874 		return NETSEC_XDP_CONSUMED;
875 
876 	spin_lock(&tx_ring->lock);
877 	ret = netsec_xdp_queue_one(priv, xdpf, false);
878 	spin_unlock(&tx_ring->lock);
879 
880 	return ret;
881 }
882 
883 static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
884 			  struct xdp_buff *xdp)
885 {
886 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
887 	unsigned int len = xdp->data_end - xdp->data;
888 	u32 ret = NETSEC_XDP_PASS;
889 	int err;
890 	u32 act;
891 
892 	act = bpf_prog_run_xdp(prog, xdp);
893 
894 	switch (act) {
895 	case XDP_PASS:
896 		ret = NETSEC_XDP_PASS;
897 		break;
898 	case XDP_TX:
899 		ret = netsec_xdp_xmit_back(priv, xdp);
900 		if (ret != NETSEC_XDP_TX)
901 			page_pool_put_page(dring->page_pool,
902 					   virt_to_head_page(xdp->data), len,
903 					   true);
904 		break;
905 	case XDP_REDIRECT:
906 		err = xdp_do_redirect(priv->ndev, xdp, prog);
907 		if (!err) {
908 			ret = NETSEC_XDP_REDIR;
909 		} else {
910 			ret = NETSEC_XDP_CONSUMED;
911 			page_pool_put_page(dring->page_pool,
912 					   virt_to_head_page(xdp->data), len,
913 					   true);
914 		}
915 		break;
916 	default:
917 		bpf_warn_invalid_xdp_action(act);
918 		/* fall through */
919 	case XDP_ABORTED:
920 		trace_xdp_exception(priv->ndev, prog, act);
921 		/* fall through -- handle aborts by dropping packet */
922 	case XDP_DROP:
923 		ret = NETSEC_XDP_CONSUMED;
924 		page_pool_put_page(dring->page_pool,
925 				   virt_to_head_page(xdp->data), len, true);
926 		break;
927 	}
928 
929 	return ret;
930 }
931 
932 static int netsec_process_rx(struct netsec_priv *priv, int budget)
933 {
934 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
935 	struct net_device *ndev = priv->ndev;
936 	struct netsec_rx_pkt_info rx_info;
937 	enum dma_data_direction dma_dir;
938 	struct bpf_prog *xdp_prog;
939 	u16 xdp_xmit = 0;
940 	u32 xdp_act = 0;
941 	int done = 0;
942 
943 	rcu_read_lock();
944 	xdp_prog = READ_ONCE(priv->xdp_prog);
945 	dma_dir = page_pool_get_dma_dir(dring->page_pool);
946 
947 	while (done < budget) {
948 		u16 idx = dring->tail;
949 		struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
950 		struct netsec_desc *desc = &dring->desc[idx];
951 		struct page *page = virt_to_page(desc->addr);
952 		u32 xdp_result = NETSEC_XDP_PASS;
953 		struct sk_buff *skb = NULL;
954 		u16 pkt_len, desc_len;
955 		dma_addr_t dma_handle;
956 		struct xdp_buff xdp;
957 		void *buf_addr;
958 
959 		if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
960 			/* reading the register clears the irq */
961 			netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
962 			break;
963 		}
964 
965 		/* This  barrier is needed to keep us from reading
966 		 * any other fields out of the netsec_de until we have
967 		 * verified the descriptor has been written back
968 		 */
969 		dma_rmb();
970 		done++;
971 
972 		pkt_len = de->buf_len_info >> 16;
973 		rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
974 			NETSEC_RX_PKT_ERR_MASK;
975 		rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
976 		if (rx_info.err_flag) {
977 			netif_err(priv, drv, priv->ndev,
978 				  "%s: rx fail err(%d)\n", __func__,
979 				  rx_info.err_code);
980 			ndev->stats.rx_dropped++;
981 			dring->tail = (dring->tail + 1) % DESC_NUM;
982 			/* reuse buffer page frag */
983 			netsec_rx_fill(priv, idx, 1);
984 			continue;
985 		}
986 		rx_info.rx_cksum_result =
987 			(de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
988 
989 		/* allocate a fresh buffer and map it to the hardware.
990 		 * This will eventually replace the old buffer in the hardware
991 		 */
992 		buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
993 
994 		if (unlikely(!buf_addr))
995 			break;
996 
997 		dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
998 					dma_dir);
999 		prefetch(desc->addr);
1000 
1001 		xdp.data_hard_start = desc->addr;
1002 		xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
1003 		xdp_set_data_meta_invalid(&xdp);
1004 		xdp.data_end = xdp.data + pkt_len;
1005 		xdp.rxq = &dring->xdp_rxq;
1006 
1007 		if (xdp_prog) {
1008 			xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
1009 			if (xdp_result != NETSEC_XDP_PASS) {
1010 				xdp_act |= xdp_result;
1011 				if (xdp_result == NETSEC_XDP_TX)
1012 					xdp_xmit++;
1013 				goto next;
1014 			}
1015 		}
1016 		skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA);
1017 
1018 		if (unlikely(!skb)) {
1019 			/* If skb fails recycle_direct will either unmap and
1020 			 * free the page or refill the cache depending on the
1021 			 * cache state. Since we paid the allocation cost if
1022 			 * building an skb fails try to put the page into cache
1023 			 */
1024 			page_pool_put_page(dring->page_pool, page, pkt_len,
1025 					   true);
1026 			netif_err(priv, drv, priv->ndev,
1027 				  "rx failed to build skb\n");
1028 			break;
1029 		}
1030 		page_pool_release_page(dring->page_pool, page);
1031 
1032 		skb_reserve(skb, xdp.data - xdp.data_hard_start);
1033 		skb_put(skb, xdp.data_end - xdp.data);
1034 		skb->protocol = eth_type_trans(skb, priv->ndev);
1035 
1036 		if (priv->rx_cksum_offload_flag &&
1037 		    rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
1038 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1039 
1040 next:
1041 		if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
1042 		    xdp_result) {
1043 			ndev->stats.rx_packets++;
1044 			ndev->stats.rx_bytes += xdp.data_end - xdp.data;
1045 		}
1046 
1047 		/* Update the descriptor with fresh buffers */
1048 		desc->len = desc_len;
1049 		desc->dma_addr = dma_handle;
1050 		desc->addr = buf_addr;
1051 
1052 		netsec_rx_fill(priv, idx, 1);
1053 		dring->tail = (dring->tail + 1) % DESC_NUM;
1054 	}
1055 	netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit);
1056 
1057 	rcu_read_unlock();
1058 
1059 	return done;
1060 }
1061 
1062 static int netsec_napi_poll(struct napi_struct *napi, int budget)
1063 {
1064 	struct netsec_priv *priv;
1065 	int done;
1066 
1067 	priv = container_of(napi, struct netsec_priv, napi);
1068 
1069 	netsec_process_tx(priv);
1070 	done = netsec_process_rx(priv, budget);
1071 
1072 	if (done < budget && napi_complete_done(napi, done)) {
1073 		unsigned long flags;
1074 
1075 		spin_lock_irqsave(&priv->reglock, flags);
1076 		netsec_write(priv, NETSEC_REG_INTEN_SET,
1077 			     NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1078 		spin_unlock_irqrestore(&priv->reglock, flags);
1079 	}
1080 
1081 	return done;
1082 }
1083 
1084 
1085 static int netsec_desc_used(struct netsec_desc_ring *dring)
1086 {
1087 	int used;
1088 
1089 	if (dring->head >= dring->tail)
1090 		used = dring->head - dring->tail;
1091 	else
1092 		used = dring->head + DESC_NUM - dring->tail;
1093 
1094 	return used;
1095 }
1096 
1097 static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
1098 {
1099 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1100 
1101 	/* keep tail from touching the queue */
1102 	if (DESC_NUM - used < 2) {
1103 		netif_stop_queue(priv->ndev);
1104 
1105 		/* Make sure we read the updated value in case
1106 		 * descriptors got freed
1107 		 */
1108 		smp_rmb();
1109 
1110 		used = netsec_desc_used(dring);
1111 		if (DESC_NUM - used < 2)
1112 			return NETDEV_TX_BUSY;
1113 
1114 		netif_wake_queue(priv->ndev);
1115 	}
1116 
1117 	return 0;
1118 }
1119 
1120 static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
1121 					    struct net_device *ndev)
1122 {
1123 	struct netsec_priv *priv = netdev_priv(ndev);
1124 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1125 	struct netsec_tx_pkt_ctrl tx_ctrl = {};
1126 	struct netsec_desc tx_desc;
1127 	u16 tso_seg_len = 0;
1128 	int filled;
1129 
1130 	spin_lock_bh(&dring->lock);
1131 	filled = netsec_desc_used(dring);
1132 	if (netsec_check_stop_tx(priv, filled)) {
1133 		spin_unlock_bh(&dring->lock);
1134 		net_warn_ratelimited("%s %s Tx queue full\n",
1135 				     dev_name(priv->dev), ndev->name);
1136 		return NETDEV_TX_BUSY;
1137 	}
1138 
1139 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1140 		tx_ctrl.cksum_offload_flag = true;
1141 
1142 	if (skb_is_gso(skb))
1143 		tso_seg_len = skb_shinfo(skb)->gso_size;
1144 
1145 	if (tso_seg_len > 0) {
1146 		if (skb->protocol == htons(ETH_P_IP)) {
1147 			ip_hdr(skb)->tot_len = 0;
1148 			tcp_hdr(skb)->check =
1149 				~tcp_v4_check(0, ip_hdr(skb)->saddr,
1150 					      ip_hdr(skb)->daddr, 0);
1151 		} else {
1152 			tcp_v6_gso_csum_prep(skb);
1153 		}
1154 
1155 		tx_ctrl.tcp_seg_offload_flag = true;
1156 		tx_ctrl.tcp_seg_len = tso_seg_len;
1157 	}
1158 
1159 	tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
1160 					  skb_headlen(skb), DMA_TO_DEVICE);
1161 	if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
1162 		spin_unlock_bh(&dring->lock);
1163 		netif_err(priv, drv, priv->ndev,
1164 			  "%s: DMA mapping failed\n", __func__);
1165 		ndev->stats.tx_dropped++;
1166 		dev_kfree_skb_any(skb);
1167 		return NETDEV_TX_OK;
1168 	}
1169 	tx_desc.addr = skb->data;
1170 	tx_desc.len = skb_headlen(skb);
1171 	tx_desc.buf_type = TYPE_NETSEC_SKB;
1172 
1173 	skb_tx_timestamp(skb);
1174 	netdev_sent_queue(priv->ndev, skb->len);
1175 
1176 	netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
1177 	spin_unlock_bh(&dring->lock);
1178 	netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
1179 
1180 	return NETDEV_TX_OK;
1181 }
1182 
1183 static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
1184 {
1185 	struct netsec_desc_ring *dring = &priv->desc_ring[id];
1186 	struct netsec_desc *desc;
1187 	u16 idx;
1188 
1189 	if (!dring->vaddr || !dring->desc)
1190 		return;
1191 	for (idx = 0; idx < DESC_NUM; idx++) {
1192 		desc = &dring->desc[idx];
1193 		if (!desc->addr)
1194 			continue;
1195 
1196 		if (id == NETSEC_RING_RX) {
1197 			struct page *page = virt_to_page(desc->addr);
1198 
1199 			page_pool_put_full_page(dring->page_pool, page, false);
1200 		} else if (id == NETSEC_RING_TX) {
1201 			dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
1202 					 DMA_TO_DEVICE);
1203 			dev_kfree_skb(desc->skb);
1204 		}
1205 	}
1206 
1207 	/* Rx is currently using page_pool */
1208 	if (id == NETSEC_RING_RX) {
1209 		if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
1210 			xdp_rxq_info_unreg(&dring->xdp_rxq);
1211 		page_pool_destroy(dring->page_pool);
1212 	}
1213 
1214 	memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
1215 	memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
1216 
1217 	dring->head = 0;
1218 	dring->tail = 0;
1219 
1220 	if (id == NETSEC_RING_TX)
1221 		netdev_reset_queue(priv->ndev);
1222 }
1223 
1224 static void netsec_free_dring(struct netsec_priv *priv, int id)
1225 {
1226 	struct netsec_desc_ring *dring = &priv->desc_ring[id];
1227 
1228 	if (dring->vaddr) {
1229 		dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
1230 				  dring->vaddr, dring->desc_dma);
1231 		dring->vaddr = NULL;
1232 	}
1233 
1234 	kfree(dring->desc);
1235 	dring->desc = NULL;
1236 }
1237 
1238 static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1239 {
1240 	struct netsec_desc_ring *dring = &priv->desc_ring[id];
1241 
1242 	dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
1243 					  &dring->desc_dma, GFP_KERNEL);
1244 	if (!dring->vaddr)
1245 		goto err;
1246 
1247 	dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
1248 	if (!dring->desc)
1249 		goto err;
1250 
1251 	return 0;
1252 err:
1253 	netsec_free_dring(priv, id);
1254 
1255 	return -ENOMEM;
1256 }
1257 
1258 static void netsec_setup_tx_dring(struct netsec_priv *priv)
1259 {
1260 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1261 	int i;
1262 
1263 	for (i = 0; i < DESC_NUM; i++) {
1264 		struct netsec_de *de;
1265 
1266 		de = dring->vaddr + (DESC_SZ * i);
1267 		/* de->attr is not going to be accessed by the NIC
1268 		 * until netsec_set_tx_de() is called.
1269 		 * No need for a dma_wmb() here
1270 		 */
1271 		de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
1272 	}
1273 }
1274 
1275 static int netsec_setup_rx_dring(struct netsec_priv *priv)
1276 {
1277 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
1278 	struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
1279 	struct page_pool_params pp_params = {
1280 		.order = 0,
1281 		/* internal DMA mapping in page_pool */
1282 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1283 		.pool_size = DESC_NUM,
1284 		.nid = NUMA_NO_NODE,
1285 		.dev = priv->dev,
1286 		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
1287 		.offset = NETSEC_RXBUF_HEADROOM,
1288 		.max_len = NETSEC_RX_BUF_SIZE,
1289 	};
1290 	int i, err;
1291 
1292 	dring->page_pool = page_pool_create(&pp_params);
1293 	if (IS_ERR(dring->page_pool)) {
1294 		err = PTR_ERR(dring->page_pool);
1295 		dring->page_pool = NULL;
1296 		goto err_out;
1297 	}
1298 
1299 	err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0);
1300 	if (err)
1301 		goto err_out;
1302 
1303 	err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL,
1304 					 dring->page_pool);
1305 	if (err)
1306 		goto err_out;
1307 
1308 	for (i = 0; i < DESC_NUM; i++) {
1309 		struct netsec_desc *desc = &dring->desc[i];
1310 		dma_addr_t dma_handle;
1311 		void *buf;
1312 		u16 len;
1313 
1314 		buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
1315 
1316 		if (!buf) {
1317 			err = -ENOMEM;
1318 			goto err_out;
1319 		}
1320 		desc->dma_addr = dma_handle;
1321 		desc->addr = buf;
1322 		desc->len = len;
1323 	}
1324 
1325 	netsec_rx_fill(priv, 0, DESC_NUM);
1326 
1327 	return 0;
1328 
1329 err_out:
1330 	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1331 	return err;
1332 }
1333 
1334 static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
1335 					   u32 addr_h, u32 addr_l, u32 size)
1336 {
1337 	u64 base = (u64)addr_h << 32 | addr_l;
1338 	void __iomem *ucode;
1339 	u32 i;
1340 
1341 	ucode = ioremap(base, size * sizeof(u32));
1342 	if (!ucode)
1343 		return -ENOMEM;
1344 
1345 	for (i = 0; i < size; i++)
1346 		netsec_write(priv, reg, readl(ucode + i * 4));
1347 
1348 	iounmap(ucode);
1349 	return 0;
1350 }
1351 
1352 static int netsec_netdev_load_microcode(struct netsec_priv *priv)
1353 {
1354 	u32 addr_h, addr_l, size;
1355 	int err;
1356 
1357 	addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
1358 	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
1359 	size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
1360 	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
1361 					      addr_h, addr_l, size);
1362 	if (err)
1363 		return err;
1364 
1365 	addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
1366 	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
1367 	size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
1368 	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
1369 					      addr_h, addr_l, size);
1370 	if (err)
1371 		return err;
1372 
1373 	addr_h = 0;
1374 	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
1375 	size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
1376 	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
1377 					      addr_h, addr_l, size);
1378 	if (err)
1379 		return err;
1380 
1381 	return 0;
1382 }
1383 
1384 static int netsec_reset_hardware(struct netsec_priv *priv,
1385 				 bool load_ucode)
1386 {
1387 	u32 value;
1388 	int err;
1389 
1390 	/* stop DMA engines */
1391 	if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
1392 		netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
1393 			     NETSEC_DMA_CTRL_REG_STOP);
1394 		netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
1395 			     NETSEC_DMA_CTRL_REG_STOP);
1396 
1397 		while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
1398 		       NETSEC_DMA_CTRL_REG_STOP)
1399 			cpu_relax();
1400 
1401 		while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
1402 		       NETSEC_DMA_CTRL_REG_STOP)
1403 			cpu_relax();
1404 	}
1405 
1406 	netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
1407 	netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
1408 	netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
1409 
1410 	while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
1411 		cpu_relax();
1412 
1413 	/* set desc_start addr */
1414 	netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
1415 		     upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1416 	netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
1417 		     lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1418 
1419 	netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
1420 		     upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1421 	netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
1422 		     lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1423 
1424 	/* set normal tx dring ring config */
1425 	netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
1426 		     1 << NETSEC_REG_DESC_ENDIAN);
1427 	netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
1428 		     1 << NETSEC_REG_DESC_ENDIAN);
1429 
1430 	if (load_ucode) {
1431 		err = netsec_netdev_load_microcode(priv);
1432 		if (err) {
1433 			netif_err(priv, probe, priv->ndev,
1434 				  "%s: failed to load microcode (%d)\n",
1435 				  __func__, err);
1436 			return err;
1437 		}
1438 	}
1439 
1440 	/* start DMA engines */
1441 	netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
1442 	netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
1443 
1444 	usleep_range(1000, 2000);
1445 
1446 	if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
1447 	      NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
1448 		netif_err(priv, probe, priv->ndev,
1449 			  "microengine start failed\n");
1450 		return -ENXIO;
1451 	}
1452 	netsec_write(priv, NETSEC_REG_TOP_STATUS,
1453 		     NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
1454 
1455 	value = NETSEC_PKT_CTRL_REG_MODE_NRM;
1456 	if (priv->ndev->mtu > ETH_DATA_LEN)
1457 		value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
1458 
1459 	/* change to normal mode */
1460 	netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
1461 	netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
1462 
1463 	while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
1464 		NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
1465 		cpu_relax();
1466 
1467 	/* clear any pending EMPTY/ERR irq status */
1468 	netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
1469 
1470 	/* Disable TX & RX intr */
1471 	netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1472 
1473 	return 0;
1474 }
1475 
1476 static int netsec_start_gmac(struct netsec_priv *priv)
1477 {
1478 	struct phy_device *phydev = priv->ndev->phydev;
1479 	u32 value = 0;
1480 	int ret;
1481 
1482 	if (phydev->speed != SPEED_1000)
1483 		value = (NETSEC_GMAC_MCR_REG_CST |
1484 			 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
1485 
1486 	if (netsec_mac_write(priv, GMAC_REG_MCR, value))
1487 		return -ETIMEDOUT;
1488 	if (netsec_mac_write(priv, GMAC_REG_BMR,
1489 			     NETSEC_GMAC_BMR_REG_RESET))
1490 		return -ETIMEDOUT;
1491 
1492 	/* Wait soft reset */
1493 	usleep_range(1000, 5000);
1494 
1495 	ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
1496 	if (ret)
1497 		return ret;
1498 	if (value & NETSEC_GMAC_BMR_REG_SWR)
1499 		return -EAGAIN;
1500 
1501 	netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
1502 	if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
1503 		return -ETIMEDOUT;
1504 
1505 	netsec_write(priv, MAC_REG_DESC_INIT, 1);
1506 	if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
1507 		return -ETIMEDOUT;
1508 
1509 	if (netsec_mac_write(priv, GMAC_REG_BMR,
1510 			     NETSEC_GMAC_BMR_REG_COMMON))
1511 		return -ETIMEDOUT;
1512 	if (netsec_mac_write(priv, GMAC_REG_RDLAR,
1513 			     NETSEC_GMAC_RDLAR_REG_COMMON))
1514 		return -ETIMEDOUT;
1515 	if (netsec_mac_write(priv, GMAC_REG_TDLAR,
1516 			     NETSEC_GMAC_TDLAR_REG_COMMON))
1517 		return -ETIMEDOUT;
1518 	if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
1519 		return -ETIMEDOUT;
1520 
1521 	ret = netsec_mac_update_to_phy_state(priv);
1522 	if (ret)
1523 		return ret;
1524 
1525 	ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1526 	if (ret)
1527 		return ret;
1528 
1529 	value |= NETSEC_GMAC_OMR_REG_SR;
1530 	value |= NETSEC_GMAC_OMR_REG_ST;
1531 
1532 	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1533 	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1534 
1535 	netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
1536 
1537 	if (netsec_mac_write(priv, GMAC_REG_OMR, value))
1538 		return -ETIMEDOUT;
1539 
1540 	return 0;
1541 }
1542 
1543 static int netsec_stop_gmac(struct netsec_priv *priv)
1544 {
1545 	u32 value;
1546 	int ret;
1547 
1548 	ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1549 	if (ret)
1550 		return ret;
1551 	value &= ~NETSEC_GMAC_OMR_REG_SR;
1552 	value &= ~NETSEC_GMAC_OMR_REG_ST;
1553 
1554 	/* disable all interrupts */
1555 	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1556 	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1557 
1558 	return netsec_mac_write(priv, GMAC_REG_OMR, value);
1559 }
1560 
1561 static void netsec_phy_adjust_link(struct net_device *ndev)
1562 {
1563 	struct netsec_priv *priv = netdev_priv(ndev);
1564 
1565 	if (ndev->phydev->link)
1566 		netsec_start_gmac(priv);
1567 	else
1568 		netsec_stop_gmac(priv);
1569 
1570 	phy_print_status(ndev->phydev);
1571 }
1572 
1573 static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
1574 {
1575 	struct netsec_priv *priv = dev_id;
1576 	u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
1577 	unsigned long flags;
1578 
1579 	/* Disable interrupts */
1580 	if (status & NETSEC_IRQ_TX) {
1581 		val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
1582 		netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
1583 	}
1584 	if (status & NETSEC_IRQ_RX) {
1585 		val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
1586 		netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
1587 	}
1588 
1589 	spin_lock_irqsave(&priv->reglock, flags);
1590 	netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1591 	spin_unlock_irqrestore(&priv->reglock, flags);
1592 
1593 	napi_schedule(&priv->napi);
1594 
1595 	return IRQ_HANDLED;
1596 }
1597 
1598 static int netsec_netdev_open(struct net_device *ndev)
1599 {
1600 	struct netsec_priv *priv = netdev_priv(ndev);
1601 	int ret;
1602 
1603 	pm_runtime_get_sync(priv->dev);
1604 
1605 	netsec_setup_tx_dring(priv);
1606 	ret = netsec_setup_rx_dring(priv);
1607 	if (ret) {
1608 		netif_err(priv, probe, priv->ndev,
1609 			  "%s: fail setup ring\n", __func__);
1610 		goto err1;
1611 	}
1612 
1613 	ret = request_irq(priv->ndev->irq, netsec_irq_handler,
1614 			  IRQF_SHARED, "netsec", priv);
1615 	if (ret) {
1616 		netif_err(priv, drv, priv->ndev, "request_irq failed\n");
1617 		goto err2;
1618 	}
1619 
1620 	if (dev_of_node(priv->dev)) {
1621 		if (!of_phy_connect(priv->ndev, priv->phy_np,
1622 				    netsec_phy_adjust_link, 0,
1623 				    priv->phy_interface)) {
1624 			netif_err(priv, link, priv->ndev, "missing PHY\n");
1625 			ret = -ENODEV;
1626 			goto err3;
1627 		}
1628 	} else {
1629 		ret = phy_connect_direct(priv->ndev, priv->phydev,
1630 					 netsec_phy_adjust_link,
1631 					 priv->phy_interface);
1632 		if (ret) {
1633 			netif_err(priv, link, priv->ndev,
1634 				  "phy_connect_direct() failed (%d)\n", ret);
1635 			goto err3;
1636 		}
1637 	}
1638 
1639 	phy_start(ndev->phydev);
1640 
1641 	netsec_start_gmac(priv);
1642 	napi_enable(&priv->napi);
1643 	netif_start_queue(ndev);
1644 
1645 	/* Enable TX+RX intr. */
1646 	netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1647 
1648 	return 0;
1649 err3:
1650 	free_irq(priv->ndev->irq, priv);
1651 err2:
1652 	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1653 err1:
1654 	pm_runtime_put_sync(priv->dev);
1655 	return ret;
1656 }
1657 
1658 static int netsec_netdev_stop(struct net_device *ndev)
1659 {
1660 	int ret;
1661 	struct netsec_priv *priv = netdev_priv(ndev);
1662 
1663 	netif_stop_queue(priv->ndev);
1664 	dma_wmb();
1665 
1666 	napi_disable(&priv->napi);
1667 
1668 	netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1669 	netsec_stop_gmac(priv);
1670 
1671 	free_irq(priv->ndev->irq, priv);
1672 
1673 	netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
1674 	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1675 
1676 	phy_stop(ndev->phydev);
1677 	phy_disconnect(ndev->phydev);
1678 
1679 	ret = netsec_reset_hardware(priv, false);
1680 
1681 	pm_runtime_put_sync(priv->dev);
1682 
1683 	return ret;
1684 }
1685 
1686 static int netsec_netdev_init(struct net_device *ndev)
1687 {
1688 	struct netsec_priv *priv = netdev_priv(ndev);
1689 	int ret;
1690 	u16 data;
1691 
1692 	BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
1693 
1694 	ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
1695 	if (ret)
1696 		return ret;
1697 
1698 	ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
1699 	if (ret)
1700 		goto err1;
1701 
1702 	/* set phy power down */
1703 	data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
1704 		BMCR_PDOWN;
1705 	netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
1706 
1707 	ret = netsec_reset_hardware(priv, true);
1708 	if (ret)
1709 		goto err2;
1710 
1711 	spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
1712 	spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
1713 
1714 	return 0;
1715 err2:
1716 	netsec_free_dring(priv, NETSEC_RING_RX);
1717 err1:
1718 	netsec_free_dring(priv, NETSEC_RING_TX);
1719 	return ret;
1720 }
1721 
1722 static void netsec_netdev_uninit(struct net_device *ndev)
1723 {
1724 	struct netsec_priv *priv = netdev_priv(ndev);
1725 
1726 	netsec_free_dring(priv, NETSEC_RING_RX);
1727 	netsec_free_dring(priv, NETSEC_RING_TX);
1728 }
1729 
1730 static int netsec_netdev_set_features(struct net_device *ndev,
1731 				      netdev_features_t features)
1732 {
1733 	struct netsec_priv *priv = netdev_priv(ndev);
1734 
1735 	priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
1736 
1737 	return 0;
1738 }
1739 
1740 static int netsec_xdp_xmit(struct net_device *ndev, int n,
1741 			   struct xdp_frame **frames, u32 flags)
1742 {
1743 	struct netsec_priv *priv = netdev_priv(ndev);
1744 	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
1745 	int drops = 0;
1746 	int i;
1747 
1748 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1749 		return -EINVAL;
1750 
1751 	spin_lock(&tx_ring->lock);
1752 	for (i = 0; i < n; i++) {
1753 		struct xdp_frame *xdpf = frames[i];
1754 		int err;
1755 
1756 		err = netsec_xdp_queue_one(priv, xdpf, true);
1757 		if (err != NETSEC_XDP_TX) {
1758 			xdp_return_frame_rx_napi(xdpf);
1759 			drops++;
1760 		} else {
1761 			tx_ring->xdp_xmit++;
1762 		}
1763 	}
1764 	spin_unlock(&tx_ring->lock);
1765 
1766 	if (unlikely(flags & XDP_XMIT_FLUSH)) {
1767 		netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit);
1768 		tx_ring->xdp_xmit = 0;
1769 	}
1770 
1771 	return n - drops;
1772 }
1773 
1774 static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
1775 			    struct netlink_ext_ack *extack)
1776 {
1777 	struct net_device *dev = priv->ndev;
1778 	struct bpf_prog *old_prog;
1779 
1780 	/* For now just support only the usual MTU sized frames */
1781 	if (prog && dev->mtu > 1500) {
1782 		NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
1783 		return -EOPNOTSUPP;
1784 	}
1785 
1786 	if (netif_running(dev))
1787 		netsec_netdev_stop(dev);
1788 
1789 	/* Detach old prog, if any */
1790 	old_prog = xchg(&priv->xdp_prog, prog);
1791 	if (old_prog)
1792 		bpf_prog_put(old_prog);
1793 
1794 	if (netif_running(dev))
1795 		netsec_netdev_open(dev);
1796 
1797 	return 0;
1798 }
1799 
1800 static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
1801 {
1802 	struct netsec_priv *priv = netdev_priv(ndev);
1803 
1804 	switch (xdp->command) {
1805 	case XDP_SETUP_PROG:
1806 		return netsec_xdp_setup(priv, xdp->prog, xdp->extack);
1807 	case XDP_QUERY_PROG:
1808 		xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1809 		return 0;
1810 	default:
1811 		return -EINVAL;
1812 	}
1813 }
1814 
1815 static const struct net_device_ops netsec_netdev_ops = {
1816 	.ndo_init		= netsec_netdev_init,
1817 	.ndo_uninit		= netsec_netdev_uninit,
1818 	.ndo_open		= netsec_netdev_open,
1819 	.ndo_stop		= netsec_netdev_stop,
1820 	.ndo_start_xmit		= netsec_netdev_start_xmit,
1821 	.ndo_set_features	= netsec_netdev_set_features,
1822 	.ndo_set_mac_address    = eth_mac_addr,
1823 	.ndo_validate_addr	= eth_validate_addr,
1824 	.ndo_do_ioctl		= phy_do_ioctl,
1825 	.ndo_xdp_xmit		= netsec_xdp_xmit,
1826 	.ndo_bpf		= netsec_xdp,
1827 };
1828 
1829 static int netsec_of_probe(struct platform_device *pdev,
1830 			   struct netsec_priv *priv, u32 *phy_addr)
1831 {
1832 	priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1833 	if (!priv->phy_np) {
1834 		dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
1835 		return -EINVAL;
1836 	}
1837 
1838 	*phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
1839 
1840 	priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
1841 	if (IS_ERR(priv->clk)) {
1842 		dev_err(&pdev->dev, "phy_ref_clk not found\n");
1843 		return PTR_ERR(priv->clk);
1844 	}
1845 	priv->freq = clk_get_rate(priv->clk);
1846 
1847 	return 0;
1848 }
1849 
1850 static int netsec_acpi_probe(struct platform_device *pdev,
1851 			     struct netsec_priv *priv, u32 *phy_addr)
1852 {
1853 	int ret;
1854 
1855 	if (!IS_ENABLED(CONFIG_ACPI))
1856 		return -ENODEV;
1857 
1858 	ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
1859 	if (ret) {
1860 		dev_err(&pdev->dev,
1861 			"missing required property 'phy-channel'\n");
1862 		return ret;
1863 	}
1864 
1865 	ret = device_property_read_u32(&pdev->dev,
1866 				       "socionext,phy-clock-frequency",
1867 				       &priv->freq);
1868 	if (ret)
1869 		dev_err(&pdev->dev,
1870 			"missing required property 'socionext,phy-clock-frequency'\n");
1871 	return ret;
1872 }
1873 
1874 static void netsec_unregister_mdio(struct netsec_priv *priv)
1875 {
1876 	struct phy_device *phydev = priv->phydev;
1877 
1878 	if (!dev_of_node(priv->dev) && phydev) {
1879 		phy_device_remove(phydev);
1880 		phy_device_free(phydev);
1881 	}
1882 
1883 	mdiobus_unregister(priv->mii_bus);
1884 }
1885 
1886 static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
1887 {
1888 	struct mii_bus *bus;
1889 	int ret;
1890 
1891 	bus = devm_mdiobus_alloc(priv->dev);
1892 	if (!bus)
1893 		return -ENOMEM;
1894 
1895 	snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
1896 	bus->priv = priv;
1897 	bus->name = "SNI NETSEC MDIO";
1898 	bus->read = netsec_phy_read;
1899 	bus->write = netsec_phy_write;
1900 	bus->parent = priv->dev;
1901 	priv->mii_bus = bus;
1902 
1903 	if (dev_of_node(priv->dev)) {
1904 		struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
1905 
1906 		mdio_node = of_get_child_by_name(parent, "mdio");
1907 		if (mdio_node) {
1908 			parent = mdio_node;
1909 		} else {
1910 			/* older f/w doesn't populate the mdio subnode,
1911 			 * allow relaxed upgrade of f/w in due time.
1912 			 */
1913 			dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
1914 		}
1915 
1916 		ret = of_mdiobus_register(bus, parent);
1917 		of_node_put(mdio_node);
1918 
1919 		if (ret) {
1920 			dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1921 			return ret;
1922 		}
1923 	} else {
1924 		/* Mask out all PHYs from auto probing. */
1925 		bus->phy_mask = ~0;
1926 		ret = mdiobus_register(bus);
1927 		if (ret) {
1928 			dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1929 			return ret;
1930 		}
1931 
1932 		priv->phydev = get_phy_device(bus, phy_addr, false);
1933 		if (IS_ERR(priv->phydev)) {
1934 			ret = PTR_ERR(priv->phydev);
1935 			dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
1936 			priv->phydev = NULL;
1937 			return -ENODEV;
1938 		}
1939 
1940 		ret = phy_device_register(priv->phydev);
1941 		if (ret) {
1942 			mdiobus_unregister(bus);
1943 			dev_err(priv->dev,
1944 				"phy_device_register err(%d)\n", ret);
1945 		}
1946 	}
1947 
1948 	return ret;
1949 }
1950 
1951 static int netsec_probe(struct platform_device *pdev)
1952 {
1953 	struct resource *mmio_res, *eeprom_res, *irq_res;
1954 	u8 *mac, macbuf[ETH_ALEN];
1955 	struct netsec_priv *priv;
1956 	u32 hw_ver, phy_addr = 0;
1957 	struct net_device *ndev;
1958 	int ret;
1959 
1960 	mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1961 	if (!mmio_res) {
1962 		dev_err(&pdev->dev, "No MMIO resource found.\n");
1963 		return -ENODEV;
1964 	}
1965 
1966 	eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1967 	if (!eeprom_res) {
1968 		dev_info(&pdev->dev, "No EEPROM resource found.\n");
1969 		return -ENODEV;
1970 	}
1971 
1972 	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1973 	if (!irq_res) {
1974 		dev_err(&pdev->dev, "No IRQ resource found.\n");
1975 		return -ENODEV;
1976 	}
1977 
1978 	ndev = alloc_etherdev(sizeof(*priv));
1979 	if (!ndev)
1980 		return -ENOMEM;
1981 
1982 	priv = netdev_priv(ndev);
1983 
1984 	spin_lock_init(&priv->reglock);
1985 	SET_NETDEV_DEV(ndev, &pdev->dev);
1986 	platform_set_drvdata(pdev, priv);
1987 	ndev->irq = irq_res->start;
1988 	priv->dev = &pdev->dev;
1989 	priv->ndev = ndev;
1990 
1991 	priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
1992 			   NETIF_MSG_LINK | NETIF_MSG_PROBE;
1993 
1994 	priv->phy_interface = device_get_phy_mode(&pdev->dev);
1995 	if ((int)priv->phy_interface < 0) {
1996 		dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
1997 		ret = -ENODEV;
1998 		goto free_ndev;
1999 	}
2000 
2001 	priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
2002 				    resource_size(mmio_res));
2003 	if (!priv->ioaddr) {
2004 		dev_err(&pdev->dev, "devm_ioremap() failed\n");
2005 		ret = -ENXIO;
2006 		goto free_ndev;
2007 	}
2008 
2009 	priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
2010 					 resource_size(eeprom_res));
2011 	if (!priv->eeprom_base) {
2012 		dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
2013 		ret = -ENXIO;
2014 		goto free_ndev;
2015 	}
2016 
2017 	mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
2018 	if (mac)
2019 		ether_addr_copy(ndev->dev_addr, mac);
2020 
2021 	if (priv->eeprom_base &&
2022 	    (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
2023 		void __iomem *macp = priv->eeprom_base +
2024 					NETSEC_EEPROM_MAC_ADDRESS;
2025 
2026 		ndev->dev_addr[0] = readb(macp + 3);
2027 		ndev->dev_addr[1] = readb(macp + 2);
2028 		ndev->dev_addr[2] = readb(macp + 1);
2029 		ndev->dev_addr[3] = readb(macp + 0);
2030 		ndev->dev_addr[4] = readb(macp + 7);
2031 		ndev->dev_addr[5] = readb(macp + 6);
2032 	}
2033 
2034 	if (!is_valid_ether_addr(ndev->dev_addr)) {
2035 		dev_warn(&pdev->dev, "No MAC address found, using random\n");
2036 		eth_hw_addr_random(ndev);
2037 	}
2038 
2039 	if (dev_of_node(&pdev->dev))
2040 		ret = netsec_of_probe(pdev, priv, &phy_addr);
2041 	else
2042 		ret = netsec_acpi_probe(pdev, priv, &phy_addr);
2043 	if (ret)
2044 		goto free_ndev;
2045 
2046 	priv->phy_addr = phy_addr;
2047 
2048 	if (!priv->freq) {
2049 		dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
2050 		ret = -ENODEV;
2051 		goto free_ndev;
2052 	}
2053 
2054 	/* default for throughput */
2055 	priv->et_coalesce.rx_coalesce_usecs = 500;
2056 	priv->et_coalesce.rx_max_coalesced_frames = 8;
2057 	priv->et_coalesce.tx_coalesce_usecs = 500;
2058 	priv->et_coalesce.tx_max_coalesced_frames = 8;
2059 
2060 	ret = device_property_read_u32(&pdev->dev, "max-frame-size",
2061 				       &ndev->max_mtu);
2062 	if (ret < 0)
2063 		ndev->max_mtu = ETH_DATA_LEN;
2064 
2065 	/* runtime_pm coverage just for probe, open/close also cover it */
2066 	pm_runtime_enable(&pdev->dev);
2067 	pm_runtime_get_sync(&pdev->dev);
2068 
2069 	hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
2070 	/* this driver only supports F_TAIKI style NETSEC */
2071 	if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
2072 	    NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
2073 		ret = -ENODEV;
2074 		goto pm_disable;
2075 	}
2076 
2077 	dev_info(&pdev->dev, "hardware revision %d.%d\n",
2078 		 hw_ver >> 16, hw_ver & 0xffff);
2079 
2080 	netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
2081 
2082 	ndev->netdev_ops = &netsec_netdev_ops;
2083 	ndev->ethtool_ops = &netsec_ethtool_ops;
2084 
2085 	ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
2086 				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2087 	ndev->hw_features = ndev->features;
2088 
2089 	priv->rx_cksum_offload_flag = true;
2090 
2091 	ret = netsec_register_mdio(priv, phy_addr);
2092 	if (ret)
2093 		goto unreg_napi;
2094 
2095 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
2096 		dev_warn(&pdev->dev, "Failed to set DMA mask\n");
2097 
2098 	ret = register_netdev(ndev);
2099 	if (ret) {
2100 		netif_err(priv, probe, ndev, "register_netdev() failed\n");
2101 		goto unreg_mii;
2102 	}
2103 
2104 	pm_runtime_put_sync(&pdev->dev);
2105 	return 0;
2106 
2107 unreg_mii:
2108 	netsec_unregister_mdio(priv);
2109 unreg_napi:
2110 	netif_napi_del(&priv->napi);
2111 pm_disable:
2112 	pm_runtime_put_sync(&pdev->dev);
2113 	pm_runtime_disable(&pdev->dev);
2114 free_ndev:
2115 	free_netdev(ndev);
2116 	dev_err(&pdev->dev, "init failed\n");
2117 
2118 	return ret;
2119 }
2120 
2121 static int netsec_remove(struct platform_device *pdev)
2122 {
2123 	struct netsec_priv *priv = platform_get_drvdata(pdev);
2124 
2125 	unregister_netdev(priv->ndev);
2126 
2127 	netsec_unregister_mdio(priv);
2128 
2129 	netif_napi_del(&priv->napi);
2130 
2131 	pm_runtime_disable(&pdev->dev);
2132 	free_netdev(priv->ndev);
2133 
2134 	return 0;
2135 }
2136 
2137 #ifdef CONFIG_PM
2138 static int netsec_runtime_suspend(struct device *dev)
2139 {
2140 	struct netsec_priv *priv = dev_get_drvdata(dev);
2141 
2142 	netsec_write(priv, NETSEC_REG_CLK_EN, 0);
2143 
2144 	clk_disable_unprepare(priv->clk);
2145 
2146 	return 0;
2147 }
2148 
2149 static int netsec_runtime_resume(struct device *dev)
2150 {
2151 	struct netsec_priv *priv = dev_get_drvdata(dev);
2152 
2153 	clk_prepare_enable(priv->clk);
2154 
2155 	netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
2156 					       NETSEC_CLK_EN_REG_DOM_C |
2157 					       NETSEC_CLK_EN_REG_DOM_G);
2158 	return 0;
2159 }
2160 #endif
2161 
2162 static const struct dev_pm_ops netsec_pm_ops = {
2163 	SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
2164 };
2165 
2166 static const struct of_device_id netsec_dt_ids[] = {
2167 	{ .compatible = "socionext,synquacer-netsec" },
2168 	{ }
2169 };
2170 MODULE_DEVICE_TABLE(of, netsec_dt_ids);
2171 
2172 #ifdef CONFIG_ACPI
2173 static const struct acpi_device_id netsec_acpi_ids[] = {
2174 	{ "SCX0001" },
2175 	{ }
2176 };
2177 MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
2178 #endif
2179 
2180 static struct platform_driver netsec_driver = {
2181 	.probe	= netsec_probe,
2182 	.remove	= netsec_remove,
2183 	.driver = {
2184 		.name = "netsec",
2185 		.pm = &netsec_pm_ops,
2186 		.of_match_table = netsec_dt_ids,
2187 		.acpi_match_table = ACPI_PTR(netsec_acpi_ids),
2188 	},
2189 };
2190 module_platform_driver(netsec_driver);
2191 
2192 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
2193 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
2194 MODULE_DESCRIPTION("NETSEC Ethernet driver");
2195 MODULE_LICENSE("GPL");
2196