18e8e69d6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
28e8e69d6SThomas Gleixner /*
3656e7052SJohn Crispin *
4656e7052SJohn Crispin * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5656e7052SJohn Crispin * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6656e7052SJohn Crispin * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7656e7052SJohn Crispin */
8656e7052SJohn Crispin
93d40aed8SRob Herring #include <linux/of.h>
10656e7052SJohn Crispin #include <linux/of_mdio.h>
11656e7052SJohn Crispin #include <linux/of_net.h>
12d776a57eSFelix Fietkau #include <linux/of_address.h>
13656e7052SJohn Crispin #include <linux/mfd/syscon.h>
143d40aed8SRob Herring #include <linux/platform_device.h>
15656e7052SJohn Crispin #include <linux/regmap.h>
16656e7052SJohn Crispin #include <linux/clk.h>
1726a2ad8aSSean Wang #include <linux/pm_runtime.h>
18656e7052SJohn Crispin #include <linux/if_vlan.h>
19656e7052SJohn Crispin #include <linux/reset.h>
20656e7052SJohn Crispin #include <linux/tcp.h>
2170dba204SMark Brown #include <linux/interrupt.h>
22140995c9SThierry Reding #include <linux/pinctrl/devinfo.h>
23b8fc9f30SRené van Dorst #include <linux/phylink.h>
242a3ec7aeSDaniel Golle #include <linux/pcs/pcs-mtk-lynxi.h>
25fa817272SFelix Fietkau #include <linux/jhash.h>
26c4f033d9SFelix Fietkau #include <linux/bitfield.h>
27d5c53da2SFelix Fietkau #include <net/dsa.h>
282d7605a7SFelix Fietkau #include <net/dst_metadata.h>
29a9ca9f9cSYunsheng Lin #include <net/page_pool/helpers.h>
30656e7052SJohn Crispin
31656e7052SJohn Crispin #include "mtk_eth_soc.h"
32804775dfSFelix Fietkau #include "mtk_wed.h"
33656e7052SJohn Crispin
34656e7052SJohn Crispin static int mtk_msg_level = -1;
35656e7052SJohn Crispin module_param_named(msg_level, mtk_msg_level, int, 0);
36656e7052SJohn Crispin MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
37656e7052SJohn Crispin
38656e7052SJohn Crispin #define MTK_ETHTOOL_STAT(x) { #x, \
39656e7052SJohn Crispin offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
40656e7052SJohn Crispin
41916a6ee8SLorenzo Bianconi #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
42916a6ee8SLorenzo Bianconi offsetof(struct mtk_hw_stats, xdp_stats.x) / \
43916a6ee8SLorenzo Bianconi sizeof(u64) }
44916a6ee8SLorenzo Bianconi
458cb42714SLorenzo Bianconi static const struct mtk_reg_map mtk_reg_map = {
468cb42714SLorenzo Bianconi .tx_irq_mask = 0x1a1c,
478cb42714SLorenzo Bianconi .tx_irq_status = 0x1a18,
488cb42714SLorenzo Bianconi .pdma = {
498cb42714SLorenzo Bianconi .rx_ptr = 0x0900,
508cb42714SLorenzo Bianconi .rx_cnt_cfg = 0x0904,
518cb42714SLorenzo Bianconi .pcrx_ptr = 0x0908,
528cb42714SLorenzo Bianconi .glo_cfg = 0x0a04,
538cb42714SLorenzo Bianconi .rst_idx = 0x0a08,
548cb42714SLorenzo Bianconi .delay_irq = 0x0a0c,
558cb42714SLorenzo Bianconi .irq_status = 0x0a20,
568cb42714SLorenzo Bianconi .irq_mask = 0x0a28,
5793b2591aSLorenzo Bianconi .adma_rx_dbg0 = 0x0a38,
588cb42714SLorenzo Bianconi .int_grp = 0x0a50,
598cb42714SLorenzo Bianconi },
608cb42714SLorenzo Bianconi .qdma = {
618cb42714SLorenzo Bianconi .qtx_cfg = 0x1800,
62f63959c7SFelix Fietkau .qtx_sch = 0x1804,
638cb42714SLorenzo Bianconi .rx_ptr = 0x1900,
648cb42714SLorenzo Bianconi .rx_cnt_cfg = 0x1904,
658cb42714SLorenzo Bianconi .qcrx_ptr = 0x1908,
668cb42714SLorenzo Bianconi .glo_cfg = 0x1a04,
678cb42714SLorenzo Bianconi .rst_idx = 0x1a08,
688cb42714SLorenzo Bianconi .delay_irq = 0x1a0c,
698cb42714SLorenzo Bianconi .fc_th = 0x1a10,
70f63959c7SFelix Fietkau .tx_sch_rate = 0x1a14,
718cb42714SLorenzo Bianconi .int_grp = 0x1a20,
728cb42714SLorenzo Bianconi .hred = 0x1a44,
738cb42714SLorenzo Bianconi .ctx_ptr = 0x1b00,
748cb42714SLorenzo Bianconi .dtx_ptr = 0x1b04,
758cb42714SLorenzo Bianconi .crx_ptr = 0x1b10,
768cb42714SLorenzo Bianconi .drx_ptr = 0x1b14,
778cb42714SLorenzo Bianconi .fq_head = 0x1b20,
788cb42714SLorenzo Bianconi .fq_tail = 0x1b24,
798cb42714SLorenzo Bianconi .fq_count = 0x1b28,
808cb42714SLorenzo Bianconi .fq_blen = 0x1b2c,
818cb42714SLorenzo Bianconi },
828cb42714SLorenzo Bianconi .gdm1_cnt = 0x2400,
83329bce51SLorenzo Bianconi .gdma_to_ppe = 0x4444,
84329bce51SLorenzo Bianconi .ppe_base = 0x0c00,
850c1d3fb9SLorenzo Bianconi .wdma_base = {
860c1d3fb9SLorenzo Bianconi [0] = 0x2800,
870c1d3fb9SLorenzo Bianconi [1] = 0x2c00,
880c1d3fb9SLorenzo Bianconi },
8993b2591aSLorenzo Bianconi .pse_iq_sta = 0x0110,
9093b2591aSLorenzo Bianconi .pse_oq_sta = 0x0118,
918cb42714SLorenzo Bianconi };
928cb42714SLorenzo Bianconi
938cb42714SLorenzo Bianconi static const struct mtk_reg_map mt7628_reg_map = {
948cb42714SLorenzo Bianconi .tx_irq_mask = 0x0a28,
958cb42714SLorenzo Bianconi .tx_irq_status = 0x0a20,
968cb42714SLorenzo Bianconi .pdma = {
978cb42714SLorenzo Bianconi .rx_ptr = 0x0900,
988cb42714SLorenzo Bianconi .rx_cnt_cfg = 0x0904,
998cb42714SLorenzo Bianconi .pcrx_ptr = 0x0908,
1008cb42714SLorenzo Bianconi .glo_cfg = 0x0a04,
1018cb42714SLorenzo Bianconi .rst_idx = 0x0a08,
1028cb42714SLorenzo Bianconi .delay_irq = 0x0a0c,
1038cb42714SLorenzo Bianconi .irq_status = 0x0a20,
1048cb42714SLorenzo Bianconi .irq_mask = 0x0a28,
1058cb42714SLorenzo Bianconi .int_grp = 0x0a50,
1068cb42714SLorenzo Bianconi },
1078cb42714SLorenzo Bianconi };
1088cb42714SLorenzo Bianconi
109197c9e9bSLorenzo Bianconi static const struct mtk_reg_map mt7986_reg_map = {
110197c9e9bSLorenzo Bianconi .tx_irq_mask = 0x461c,
111197c9e9bSLorenzo Bianconi .tx_irq_status = 0x4618,
112197c9e9bSLorenzo Bianconi .pdma = {
113197c9e9bSLorenzo Bianconi .rx_ptr = 0x6100,
114197c9e9bSLorenzo Bianconi .rx_cnt_cfg = 0x6104,
115197c9e9bSLorenzo Bianconi .pcrx_ptr = 0x6108,
116197c9e9bSLorenzo Bianconi .glo_cfg = 0x6204,
117197c9e9bSLorenzo Bianconi .rst_idx = 0x6208,
118197c9e9bSLorenzo Bianconi .delay_irq = 0x620c,
119197c9e9bSLorenzo Bianconi .irq_status = 0x6220,
120197c9e9bSLorenzo Bianconi .irq_mask = 0x6228,
12193b2591aSLorenzo Bianconi .adma_rx_dbg0 = 0x6238,
122197c9e9bSLorenzo Bianconi .int_grp = 0x6250,
123197c9e9bSLorenzo Bianconi },
124197c9e9bSLorenzo Bianconi .qdma = {
125197c9e9bSLorenzo Bianconi .qtx_cfg = 0x4400,
126f63959c7SFelix Fietkau .qtx_sch = 0x4404,
127197c9e9bSLorenzo Bianconi .rx_ptr = 0x4500,
128197c9e9bSLorenzo Bianconi .rx_cnt_cfg = 0x4504,
129197c9e9bSLorenzo Bianconi .qcrx_ptr = 0x4508,
130197c9e9bSLorenzo Bianconi .glo_cfg = 0x4604,
131197c9e9bSLorenzo Bianconi .rst_idx = 0x4608,
132197c9e9bSLorenzo Bianconi .delay_irq = 0x460c,
133197c9e9bSLorenzo Bianconi .fc_th = 0x4610,
134197c9e9bSLorenzo Bianconi .int_grp = 0x4620,
135197c9e9bSLorenzo Bianconi .hred = 0x4644,
136197c9e9bSLorenzo Bianconi .ctx_ptr = 0x4700,
137197c9e9bSLorenzo Bianconi .dtx_ptr = 0x4704,
138197c9e9bSLorenzo Bianconi .crx_ptr = 0x4710,
139197c9e9bSLorenzo Bianconi .drx_ptr = 0x4714,
140197c9e9bSLorenzo Bianconi .fq_head = 0x4720,
141197c9e9bSLorenzo Bianconi .fq_tail = 0x4724,
142197c9e9bSLorenzo Bianconi .fq_count = 0x4728,
143197c9e9bSLorenzo Bianconi .fq_blen = 0x472c,
144f63959c7SFelix Fietkau .tx_sch_rate = 0x4798,
145197c9e9bSLorenzo Bianconi },
146197c9e9bSLorenzo Bianconi .gdm1_cnt = 0x1c00,
147329bce51SLorenzo Bianconi .gdma_to_ppe = 0x3333,
148329bce51SLorenzo Bianconi .ppe_base = 0x2000,
1490c1d3fb9SLorenzo Bianconi .wdma_base = {
1500c1d3fb9SLorenzo Bianconi [0] = 0x4800,
1510c1d3fb9SLorenzo Bianconi [1] = 0x4c00,
1520c1d3fb9SLorenzo Bianconi },
15393b2591aSLorenzo Bianconi .pse_iq_sta = 0x0180,
15493b2591aSLorenzo Bianconi .pse_oq_sta = 0x01a0,
155197c9e9bSLorenzo Bianconi };
156197c9e9bSLorenzo Bianconi
157445eb644SLorenzo Bianconi static const struct mtk_reg_map mt7988_reg_map = {
158445eb644SLorenzo Bianconi .tx_irq_mask = 0x461c,
159445eb644SLorenzo Bianconi .tx_irq_status = 0x4618,
160445eb644SLorenzo Bianconi .pdma = {
161445eb644SLorenzo Bianconi .rx_ptr = 0x6900,
162445eb644SLorenzo Bianconi .rx_cnt_cfg = 0x6904,
163445eb644SLorenzo Bianconi .pcrx_ptr = 0x6908,
164445eb644SLorenzo Bianconi .glo_cfg = 0x6a04,
165445eb644SLorenzo Bianconi .rst_idx = 0x6a08,
166445eb644SLorenzo Bianconi .delay_irq = 0x6a0c,
167445eb644SLorenzo Bianconi .irq_status = 0x6a20,
168445eb644SLorenzo Bianconi .irq_mask = 0x6a28,
169445eb644SLorenzo Bianconi .adma_rx_dbg0 = 0x6a38,
170445eb644SLorenzo Bianconi .int_grp = 0x6a50,
171445eb644SLorenzo Bianconi },
172445eb644SLorenzo Bianconi .qdma = {
173445eb644SLorenzo Bianconi .qtx_cfg = 0x4400,
174445eb644SLorenzo Bianconi .qtx_sch = 0x4404,
175445eb644SLorenzo Bianconi .rx_ptr = 0x4500,
176445eb644SLorenzo Bianconi .rx_cnt_cfg = 0x4504,
177445eb644SLorenzo Bianconi .qcrx_ptr = 0x4508,
178445eb644SLorenzo Bianconi .glo_cfg = 0x4604,
179445eb644SLorenzo Bianconi .rst_idx = 0x4608,
180445eb644SLorenzo Bianconi .delay_irq = 0x460c,
181445eb644SLorenzo Bianconi .fc_th = 0x4610,
182445eb644SLorenzo Bianconi .int_grp = 0x4620,
183445eb644SLorenzo Bianconi .hred = 0x4644,
184445eb644SLorenzo Bianconi .ctx_ptr = 0x4700,
185445eb644SLorenzo Bianconi .dtx_ptr = 0x4704,
186445eb644SLorenzo Bianconi .crx_ptr = 0x4710,
187445eb644SLorenzo Bianconi .drx_ptr = 0x4714,
188445eb644SLorenzo Bianconi .fq_head = 0x4720,
189445eb644SLorenzo Bianconi .fq_tail = 0x4724,
190445eb644SLorenzo Bianconi .fq_count = 0x4728,
191445eb644SLorenzo Bianconi .fq_blen = 0x472c,
192445eb644SLorenzo Bianconi .tx_sch_rate = 0x4798,
193445eb644SLorenzo Bianconi },
194445eb644SLorenzo Bianconi .gdm1_cnt = 0x1c00,
195445eb644SLorenzo Bianconi .gdma_to_ppe = 0x3333,
196445eb644SLorenzo Bianconi .ppe_base = 0x2000,
197445eb644SLorenzo Bianconi .wdma_base = {
198445eb644SLorenzo Bianconi [0] = 0x4800,
199445eb644SLorenzo Bianconi [1] = 0x4c00,
200445eb644SLorenzo Bianconi },
201445eb644SLorenzo Bianconi .pse_iq_sta = 0x0180,
202445eb644SLorenzo Bianconi .pse_oq_sta = 0x01a0,
203445eb644SLorenzo Bianconi };
204445eb644SLorenzo Bianconi
205656e7052SJohn Crispin /* strings used by ethtool */
206656e7052SJohn Crispin static const struct mtk_ethtool_stats {
207656e7052SJohn Crispin char str[ETH_GSTRING_LEN];
208656e7052SJohn Crispin u32 offset;
209656e7052SJohn Crispin } mtk_ethtool_stats[] = {
210656e7052SJohn Crispin MTK_ETHTOOL_STAT(tx_bytes),
211656e7052SJohn Crispin MTK_ETHTOOL_STAT(tx_packets),
212656e7052SJohn Crispin MTK_ETHTOOL_STAT(tx_skip),
213656e7052SJohn Crispin MTK_ETHTOOL_STAT(tx_collisions),
214656e7052SJohn Crispin MTK_ETHTOOL_STAT(rx_bytes),
215656e7052SJohn Crispin MTK_ETHTOOL_STAT(rx_packets),
216656e7052SJohn Crispin MTK_ETHTOOL_STAT(rx_overflow),
217656e7052SJohn Crispin MTK_ETHTOOL_STAT(rx_fcs_errors),
218656e7052SJohn Crispin MTK_ETHTOOL_STAT(rx_short_errors),
219656e7052SJohn Crispin MTK_ETHTOOL_STAT(rx_long_errors),
220656e7052SJohn Crispin MTK_ETHTOOL_STAT(rx_checksum_errors),
221656e7052SJohn Crispin MTK_ETHTOOL_STAT(rx_flow_control_packets),
222916a6ee8SLorenzo Bianconi MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
223916a6ee8SLorenzo Bianconi MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
224916a6ee8SLorenzo Bianconi MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
225916a6ee8SLorenzo Bianconi MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
226916a6ee8SLorenzo Bianconi MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
227916a6ee8SLorenzo Bianconi MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
228916a6ee8SLorenzo Bianconi MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
229656e7052SJohn Crispin };
230656e7052SJohn Crispin
231549e5495SSean Wang static const char * const mtk_clks_source_name[] = {
232445eb644SLorenzo Bianconi "ethif",
233445eb644SLorenzo Bianconi "sgmiitop",
234445eb644SLorenzo Bianconi "esw",
235445eb644SLorenzo Bianconi "gp0",
236445eb644SLorenzo Bianconi "gp1",
237445eb644SLorenzo Bianconi "gp2",
238445eb644SLorenzo Bianconi "gp3",
239445eb644SLorenzo Bianconi "xgp1",
240445eb644SLorenzo Bianconi "xgp2",
241445eb644SLorenzo Bianconi "xgp3",
242445eb644SLorenzo Bianconi "crypto",
243445eb644SLorenzo Bianconi "fe",
244445eb644SLorenzo Bianconi "trgpll",
245445eb644SLorenzo Bianconi "sgmii_tx250m",
246445eb644SLorenzo Bianconi "sgmii_rx250m",
247445eb644SLorenzo Bianconi "sgmii_cdr_ref",
248445eb644SLorenzo Bianconi "sgmii_cdr_fb",
249445eb644SLorenzo Bianconi "sgmii2_tx250m",
250445eb644SLorenzo Bianconi "sgmii2_rx250m",
251445eb644SLorenzo Bianconi "sgmii2_cdr_ref",
252445eb644SLorenzo Bianconi "sgmii2_cdr_fb",
253445eb644SLorenzo Bianconi "sgmii_ck",
254445eb644SLorenzo Bianconi "eth2pll",
255445eb644SLorenzo Bianconi "wocpu0",
256445eb644SLorenzo Bianconi "wocpu1",
257445eb644SLorenzo Bianconi "netsys0",
258445eb644SLorenzo Bianconi "netsys1",
259445eb644SLorenzo Bianconi "ethwarp_wocpu2",
260445eb644SLorenzo Bianconi "ethwarp_wocpu1",
261445eb644SLorenzo Bianconi "ethwarp_wocpu0",
262445eb644SLorenzo Bianconi "top_usxgmii0_sel",
263445eb644SLorenzo Bianconi "top_usxgmii1_sel",
264445eb644SLorenzo Bianconi "top_sgm0_sel",
265445eb644SLorenzo Bianconi "top_sgm1_sel",
266445eb644SLorenzo Bianconi "top_xfi_phy0_xtal_sel",
267445eb644SLorenzo Bianconi "top_xfi_phy1_xtal_sel",
268445eb644SLorenzo Bianconi "top_eth_gmii_sel",
269445eb644SLorenzo Bianconi "top_eth_refck_50m_sel",
270445eb644SLorenzo Bianconi "top_eth_sys_200m_sel",
271445eb644SLorenzo Bianconi "top_eth_sys_sel",
272445eb644SLorenzo Bianconi "top_eth_xgmii_sel",
273445eb644SLorenzo Bianconi "top_eth_mii_sel",
274445eb644SLorenzo Bianconi "top_netsys_sel",
275445eb644SLorenzo Bianconi "top_netsys_500m_sel",
276445eb644SLorenzo Bianconi "top_netsys_pao_2x_sel",
277445eb644SLorenzo Bianconi "top_netsys_sync_250m_sel",
278445eb644SLorenzo Bianconi "top_netsys_ppefb_250m_sel",
279445eb644SLorenzo Bianconi "top_netsys_warp_sel",
280549e5495SSean Wang };
281549e5495SSean Wang
mtk_w32(struct mtk_eth * eth,u32 val,unsigned reg)282656e7052SJohn Crispin void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
283656e7052SJohn Crispin {
284656e7052SJohn Crispin __raw_writel(val, eth->base + reg);
285656e7052SJohn Crispin }
286656e7052SJohn Crispin
mtk_r32(struct mtk_eth * eth,unsigned reg)287656e7052SJohn Crispin u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
288656e7052SJohn Crispin {
289656e7052SJohn Crispin return __raw_readl(eth->base + reg);
290656e7052SJohn Crispin }
291656e7052SJohn Crispin
mtk_m32(struct mtk_eth * eth,u32 mask,u32 set,unsigned int reg)292445eb644SLorenzo Bianconi u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
293a5d75538SRené van Dorst {
294a5d75538SRené van Dorst u32 val;
295a5d75538SRené van Dorst
296a5d75538SRené van Dorst val = mtk_r32(eth, reg);
297a5d75538SRené van Dorst val &= ~mask;
298a5d75538SRené van Dorst val |= set;
299a5d75538SRené van Dorst mtk_w32(eth, val, reg);
300a5d75538SRené van Dorst return reg;
301a5d75538SRené van Dorst }
302a5d75538SRené van Dorst
mtk_mdio_busy_wait(struct mtk_eth * eth)303656e7052SJohn Crispin static int mtk_mdio_busy_wait(struct mtk_eth *eth)
304656e7052SJohn Crispin {
305656e7052SJohn Crispin unsigned long t_start = jiffies;
306656e7052SJohn Crispin
307656e7052SJohn Crispin while (1) {
308656e7052SJohn Crispin if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
309656e7052SJohn Crispin return 0;
310656e7052SJohn Crispin if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
311656e7052SJohn Crispin break;
3123630d519SFelix Fietkau cond_resched();
313656e7052SJohn Crispin }
314656e7052SJohn Crispin
315656e7052SJohn Crispin dev_err(eth->dev, "mdio: MDIO timeout\n");
316eda80b24SDaniel Golle return -ETIMEDOUT;
317656e7052SJohn Crispin }
318656e7052SJohn Crispin
_mtk_mdio_write_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg,u32 write_data)31990088837SAndrew Lunn static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
320eda80b24SDaniel Golle u32 write_data)
321656e7052SJohn Crispin {
322eda80b24SDaniel Golle int ret;
323656e7052SJohn Crispin
324eda80b24SDaniel Golle ret = mtk_mdio_busy_wait(eth);
325eda80b24SDaniel Golle if (ret < 0)
326eda80b24SDaniel Golle return ret;
327656e7052SJohn Crispin
328eda80b24SDaniel Golle mtk_w32(eth, PHY_IAC_ACCESS |
329eda80b24SDaniel Golle PHY_IAC_START_C22 |
330eda80b24SDaniel Golle PHY_IAC_CMD_WRITE |
331eda80b24SDaniel Golle PHY_IAC_REG(phy_reg) |
332eda80b24SDaniel Golle PHY_IAC_ADDR(phy_addr) |
333eda80b24SDaniel Golle PHY_IAC_DATA(write_data),
334656e7052SJohn Crispin MTK_PHY_IAC);
335656e7052SJohn Crispin
336eda80b24SDaniel Golle ret = mtk_mdio_busy_wait(eth);
337eda80b24SDaniel Golle if (ret < 0)
338eda80b24SDaniel Golle return ret;
339656e7052SJohn Crispin
340656e7052SJohn Crispin return 0;
341656e7052SJohn Crispin }
342656e7052SJohn Crispin
_mtk_mdio_write_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg,u32 write_data)34390088837SAndrew Lunn static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
34490088837SAndrew Lunn u32 devad, u32 phy_reg, u32 write_data)
345656e7052SJohn Crispin {
346eda80b24SDaniel Golle int ret;
347656e7052SJohn Crispin
348eda80b24SDaniel Golle ret = mtk_mdio_busy_wait(eth);
349eda80b24SDaniel Golle if (ret < 0)
350eda80b24SDaniel Golle return ret;
351656e7052SJohn Crispin
352e2e7f6e2SDaniel Golle mtk_w32(eth, PHY_IAC_ACCESS |
353e2e7f6e2SDaniel Golle PHY_IAC_START_C45 |
354e2e7f6e2SDaniel Golle PHY_IAC_CMD_C45_ADDR |
35590088837SAndrew Lunn PHY_IAC_REG(devad) |
356e2e7f6e2SDaniel Golle PHY_IAC_ADDR(phy_addr) |
35790088837SAndrew Lunn PHY_IAC_DATA(phy_reg),
35890088837SAndrew Lunn MTK_PHY_IAC);
35990088837SAndrew Lunn
36090088837SAndrew Lunn ret = mtk_mdio_busy_wait(eth);
36190088837SAndrew Lunn if (ret < 0)
36290088837SAndrew Lunn return ret;
36390088837SAndrew Lunn
36490088837SAndrew Lunn mtk_w32(eth, PHY_IAC_ACCESS |
36590088837SAndrew Lunn PHY_IAC_START_C45 |
36690088837SAndrew Lunn PHY_IAC_CMD_WRITE |
36790088837SAndrew Lunn PHY_IAC_REG(devad) |
36890088837SAndrew Lunn PHY_IAC_ADDR(phy_addr) |
36990088837SAndrew Lunn PHY_IAC_DATA(write_data),
37090088837SAndrew Lunn MTK_PHY_IAC);
37190088837SAndrew Lunn
37290088837SAndrew Lunn ret = mtk_mdio_busy_wait(eth);
37390088837SAndrew Lunn if (ret < 0)
37490088837SAndrew Lunn return ret;
37590088837SAndrew Lunn
37690088837SAndrew Lunn return 0;
37790088837SAndrew Lunn }
37890088837SAndrew Lunn
_mtk_mdio_read_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg)37990088837SAndrew Lunn static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
38090088837SAndrew Lunn {
38190088837SAndrew Lunn int ret;
38290088837SAndrew Lunn
38390088837SAndrew Lunn ret = mtk_mdio_busy_wait(eth);
38490088837SAndrew Lunn if (ret < 0)
38590088837SAndrew Lunn return ret;
38690088837SAndrew Lunn
38790088837SAndrew Lunn mtk_w32(eth, PHY_IAC_ACCESS |
38890088837SAndrew Lunn PHY_IAC_START_C22 |
38990088837SAndrew Lunn PHY_IAC_CMD_C22_READ |
39090088837SAndrew Lunn PHY_IAC_REG(phy_reg) |
39190088837SAndrew Lunn PHY_IAC_ADDR(phy_addr),
39290088837SAndrew Lunn MTK_PHY_IAC);
39390088837SAndrew Lunn
39490088837SAndrew Lunn ret = mtk_mdio_busy_wait(eth);
39590088837SAndrew Lunn if (ret < 0)
39690088837SAndrew Lunn return ret;
39790088837SAndrew Lunn
39890088837SAndrew Lunn return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
39990088837SAndrew Lunn }
40090088837SAndrew Lunn
_mtk_mdio_read_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg)40190088837SAndrew Lunn static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
40290088837SAndrew Lunn u32 devad, u32 phy_reg)
40390088837SAndrew Lunn {
40490088837SAndrew Lunn int ret;
40590088837SAndrew Lunn
40690088837SAndrew Lunn ret = mtk_mdio_busy_wait(eth);
40790088837SAndrew Lunn if (ret < 0)
40890088837SAndrew Lunn return ret;
40990088837SAndrew Lunn
41090088837SAndrew Lunn mtk_w32(eth, PHY_IAC_ACCESS |
41190088837SAndrew Lunn PHY_IAC_START_C45 |
41290088837SAndrew Lunn PHY_IAC_CMD_C45_ADDR |
41390088837SAndrew Lunn PHY_IAC_REG(devad) |
41490088837SAndrew Lunn PHY_IAC_ADDR(phy_addr) |
41590088837SAndrew Lunn PHY_IAC_DATA(phy_reg),
416e2e7f6e2SDaniel Golle MTK_PHY_IAC);
417e2e7f6e2SDaniel Golle
418e2e7f6e2SDaniel Golle ret = mtk_mdio_busy_wait(eth);
419e2e7f6e2SDaniel Golle if (ret < 0)
420e2e7f6e2SDaniel Golle return ret;
421e2e7f6e2SDaniel Golle
422e2e7f6e2SDaniel Golle mtk_w32(eth, PHY_IAC_ACCESS |
423e2e7f6e2SDaniel Golle PHY_IAC_START_C45 |
424e2e7f6e2SDaniel Golle PHY_IAC_CMD_C45_READ |
42590088837SAndrew Lunn PHY_IAC_REG(devad) |
426e2e7f6e2SDaniel Golle PHY_IAC_ADDR(phy_addr),
427e2e7f6e2SDaniel Golle MTK_PHY_IAC);
428656e7052SJohn Crispin
429eda80b24SDaniel Golle ret = mtk_mdio_busy_wait(eth);
430eda80b24SDaniel Golle if (ret < 0)
431eda80b24SDaniel Golle return ret;
432656e7052SJohn Crispin
433eda80b24SDaniel Golle return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
434656e7052SJohn Crispin }
435656e7052SJohn Crispin
mtk_mdio_write_c22(struct mii_bus * bus,int phy_addr,int phy_reg,u16 val)43690088837SAndrew Lunn static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
437656e7052SJohn Crispin int phy_reg, u16 val)
438656e7052SJohn Crispin {
439656e7052SJohn Crispin struct mtk_eth *eth = bus->priv;
440656e7052SJohn Crispin
44190088837SAndrew Lunn return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
442656e7052SJohn Crispin }
443656e7052SJohn Crispin
mtk_mdio_write_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg,u16 val)44490088837SAndrew Lunn static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
44590088837SAndrew Lunn int devad, int phy_reg, u16 val)
446656e7052SJohn Crispin {
447656e7052SJohn Crispin struct mtk_eth *eth = bus->priv;
448656e7052SJohn Crispin
44990088837SAndrew Lunn return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
45090088837SAndrew Lunn }
45190088837SAndrew Lunn
mtk_mdio_read_c22(struct mii_bus * bus,int phy_addr,int phy_reg)45290088837SAndrew Lunn static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
45390088837SAndrew Lunn {
45490088837SAndrew Lunn struct mtk_eth *eth = bus->priv;
45590088837SAndrew Lunn
45690088837SAndrew Lunn return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
45790088837SAndrew Lunn }
45890088837SAndrew Lunn
mtk_mdio_read_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg)45990088837SAndrew Lunn static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
46090088837SAndrew Lunn int phy_reg)
46190088837SAndrew Lunn {
46290088837SAndrew Lunn struct mtk_eth *eth = bus->priv;
46390088837SAndrew Lunn
46490088837SAndrew Lunn return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
465656e7052SJohn Crispin }
466656e7052SJohn Crispin
mt7621_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)4678efaa653SRené van Dorst static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
4688efaa653SRené van Dorst phy_interface_t interface)
4698efaa653SRené van Dorst {
4708efaa653SRené van Dorst u32 val;
4718efaa653SRené van Dorst
4728efaa653SRené van Dorst val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
4738efaa653SRené van Dorst ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
4748efaa653SRené van Dorst
4758efaa653SRené van Dorst regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
4768efaa653SRené van Dorst ETHSYS_TRGMII_MT7621_MASK, val);
4778efaa653SRené van Dorst
4788efaa653SRené van Dorst return 0;
4798efaa653SRené van Dorst }
4808efaa653SRené van Dorst
mtk_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)48119016d93SRené van Dorst static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
48276a4cb75SRussell King (Oracle) phy_interface_t interface)
483f430dea7SSean Wang {
484f430dea7SSean Wang int ret;
485f430dea7SSean Wang
48619016d93SRené van Dorst if (interface == PHY_INTERFACE_MODE_TRGMII) {
48719016d93SRené van Dorst mtk_w32(eth, TRGMII_MODE, INTF_MODE);
48804eb3d1cSRussell King (Oracle) ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
48919016d93SRené van Dorst if (ret)
49019016d93SRené van Dorst dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
49119016d93SRené van Dorst return;
49219016d93SRené van Dorst }
49319016d93SRené van Dorst
49476a4cb75SRussell King (Oracle) dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
495f430dea7SSean Wang }
496f430dea7SSean Wang
mtk_setup_bridge_switch(struct mtk_eth * eth)497445eb644SLorenzo Bianconi static void mtk_setup_bridge_switch(struct mtk_eth *eth)
498445eb644SLorenzo Bianconi {
499445eb644SLorenzo Bianconi /* Force Port1 XGMAC Link Up */
500445eb644SLorenzo Bianconi mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
501445eb644SLorenzo Bianconi MTK_XGMAC_STS(MTK_GMAC1_ID));
502445eb644SLorenzo Bianconi
503445eb644SLorenzo Bianconi /* Adjust GSW bridge IPG to 11 */
504445eb644SLorenzo Bianconi mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
505445eb644SLorenzo Bianconi (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
506445eb644SLorenzo Bianconi (GSW_IPG_11 << GSWRX_IPG_SHIFT),
507445eb644SLorenzo Bianconi MTK_GSW_CFG);
508445eb644SLorenzo Bianconi }
509445eb644SLorenzo Bianconi
mtk_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)51014a44ab0SRussell King (Oracle) static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
51114a44ab0SRussell King (Oracle) phy_interface_t interface)
51214a44ab0SRussell King (Oracle) {
51314a44ab0SRussell King (Oracle) struct mtk_mac *mac = container_of(config, struct mtk_mac,
51414a44ab0SRussell King (Oracle) phylink_config);
51514a44ab0SRussell King (Oracle) struct mtk_eth *eth = mac->hw;
51614a44ab0SRussell King (Oracle) unsigned int sid;
51714a44ab0SRussell King (Oracle)
51814a44ab0SRussell King (Oracle) if (interface == PHY_INTERFACE_MODE_SGMII ||
51914a44ab0SRussell King (Oracle) phy_interface_mode_is_8023z(interface)) {
52014a44ab0SRussell King (Oracle) sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
52114a44ab0SRussell King (Oracle) 0 : mac->id;
52214a44ab0SRussell King (Oracle)
5232a3ec7aeSDaniel Golle return eth->sgmii_pcs[sid];
52414a44ab0SRussell King (Oracle) }
52514a44ab0SRussell King (Oracle)
52614a44ab0SRussell King (Oracle) return NULL;
52714a44ab0SRussell King (Oracle) }
52814a44ab0SRussell King (Oracle)
mtk_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)529b8fc9f30SRené van Dorst static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
530b8fc9f30SRené van Dorst const struct phylink_link_state *state)
531656e7052SJohn Crispin {
532b8fc9f30SRené van Dorst struct mtk_mac *mac = container_of(config, struct mtk_mac,
533b8fc9f30SRené van Dorst phylink_config);
534b8fc9f30SRené van Dorst struct mtk_eth *eth = mac->hw;
535214b3369STom Rix int val, ge_mode, err = 0;
53614a44ab0SRussell King (Oracle) u32 i;
537656e7052SJohn Crispin
538b8fc9f30SRené van Dorst /* MT76x8 has no hardware settings between for the MAC */
539b8fc9f30SRené van Dorst if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
540b8fc9f30SRené van Dorst mac->interface != state->interface) {
541b8fc9f30SRené van Dorst /* Setup soc pin functions */
542b8fc9f30SRené van Dorst switch (state->interface) {
543572de608SSean Wang case PHY_INTERFACE_MODE_TRGMII:
54437920fceSJohn Crispin case PHY_INTERFACE_MODE_RGMII_TXID:
54537920fceSJohn Crispin case PHY_INTERFACE_MODE_RGMII_RXID:
54637920fceSJohn Crispin case PHY_INTERFACE_MODE_RGMII_ID:
547656e7052SJohn Crispin case PHY_INTERFACE_MODE_RGMII:
548656e7052SJohn Crispin case PHY_INTERFACE_MODE_MII:
5497e538372SRené van Dorst if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
5507e538372SRené van Dorst err = mtk_gmac_rgmii_path_setup(eth, mac->id);
5517e538372SRené van Dorst if (err)
5527e538372SRené van Dorst goto init_err;
5537e538372SRené van Dorst }
5547e538372SRené van Dorst break;
5557e538372SRené van Dorst case PHY_INTERFACE_MODE_1000BASEX:
5567e538372SRené van Dorst case PHY_INTERFACE_MODE_2500BASEX:
5577e538372SRené van Dorst case PHY_INTERFACE_MODE_SGMII:
5587e538372SRené van Dorst err = mtk_gmac_sgmii_path_setup(eth, mac->id);
5597e538372SRené van Dorst if (err)
5607e538372SRené van Dorst goto init_err;
5617e538372SRené van Dorst break;
5627e538372SRené van Dorst case PHY_INTERFACE_MODE_GMII:
5637e538372SRené van Dorst if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
5647e538372SRené van Dorst err = mtk_gmac_gephy_path_setup(eth, mac->id);
5657e538372SRené van Dorst if (err)
5667e538372SRené van Dorst goto init_err;
5677e538372SRené van Dorst }
5688ca7f4feSsean.wang@mediatek.com break;
569445eb644SLorenzo Bianconi case PHY_INTERFACE_MODE_INTERNAL:
570445eb644SLorenzo Bianconi break;
571656e7052SJohn Crispin default:
5728ca7f4feSsean.wang@mediatek.com goto err_phy;
573656e7052SJohn Crispin }
574656e7052SJohn Crispin
575b8fc9f30SRené van Dorst /* Setup clock for 1st gmac */
5767e538372SRené van Dorst if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
5777e538372SRené van Dorst !phy_interface_mode_is_8023z(state->interface) &&
578b8fc9f30SRené van Dorst MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
579b8fc9f30SRené van Dorst if (MTK_HAS_CAPS(mac->hw->soc->caps,
580b8fc9f30SRené van Dorst MTK_TRGMII_MT7621_CLK)) {
581b8fc9f30SRené van Dorst if (mt7621_gmac0_rgmii_adjust(mac->hw,
582b8fc9f30SRené van Dorst state->interface))
583b8fc9f30SRené van Dorst goto err_phy;
584b8fc9f30SRené van Dorst } else {
585b8fc9f30SRené van Dorst mtk_gmac0_rgmii_adjust(mac->hw,
58676a4cb75SRussell King (Oracle) state->interface);
587a5d75538SRené van Dorst
588a5d75538SRené van Dorst /* mt7623_pad_clk_setup */
589a5d75538SRené van Dorst for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
590a5d75538SRené van Dorst mtk_w32(mac->hw,
591a5d75538SRené van Dorst TD_DM_DRVP(8) | TD_DM_DRVN(8),
592a5d75538SRené van Dorst TRGMII_TD_ODT(i));
593a5d75538SRené van Dorst
594a5d75538SRené van Dorst /* Assert/release MT7623 RXC reset */
595a5d75538SRené van Dorst mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
596a5d75538SRené van Dorst TRGMII_RCK_CTRL);
597a5d75538SRené van Dorst mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
598b8fc9f30SRené van Dorst }
599b8fc9f30SRené van Dorst }
600b8fc9f30SRené van Dorst
6017e538372SRené van Dorst switch (state->interface) {
6027e538372SRené van Dorst case PHY_INTERFACE_MODE_MII:
6034e3eff5bSMarkLee case PHY_INTERFACE_MODE_GMII:
6047e538372SRené van Dorst ge_mode = 1;
6057e538372SRené van Dorst break;
6067e538372SRené van Dorst default:
6078cd9de08SRussell King (Oracle) ge_mode = 0;
6087e538372SRené van Dorst break;
6097e538372SRené van Dorst }
6107e538372SRené van Dorst
611656e7052SJohn Crispin /* put the gmac into the right mode */
612656e7052SJohn Crispin regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
613656e7052SJohn Crispin val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
614b8fc9f30SRené van Dorst val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
615656e7052SJohn Crispin regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
616b8fc9f30SRené van Dorst
617b8fc9f30SRené van Dorst mac->interface = state->interface;
618296c9120SStefan Roese }
619656e7052SJohn Crispin
6207e538372SRené van Dorst /* SGMII */
6217e538372SRené van Dorst if (state->interface == PHY_INTERFACE_MODE_SGMII ||
6227e538372SRené van Dorst phy_interface_mode_is_8023z(state->interface)) {
6237e538372SRené van Dorst /* The path GMAC to SGMII will be enabled once the SGMIISYS is
6247e538372SRené van Dorst * being setup done.
6257e538372SRené van Dorst */
6267e538372SRené van Dorst regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
6277e538372SRené van Dorst
6287e538372SRené van Dorst regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
6297e538372SRené van Dorst SYSCFG0_SGMII_MASK,
6307e538372SRené van Dorst ~(u32)SYSCFG0_SGMII_MASK);
6317e538372SRené van Dorst
63221089867SRussell King (Oracle) /* Save the syscfg0 value for mac_finish */
63321089867SRussell King (Oracle) mac->syscfg0 = val;
6347e538372SRené van Dorst } else if (phylink_autoneg_inband(mode)) {
6357e538372SRené van Dorst dev_err(eth->dev,
6367e538372SRené van Dorst "In-band mode not supported in non SGMII mode!\n");
6377e538372SRené van Dorst return;
6387e538372SRené van Dorst }
6397e538372SRené van Dorst
640445eb644SLorenzo Bianconi /* Setup gmac */
641445eb644SLorenzo Bianconi if (mtk_is_netsys_v3_or_greater(eth) &&
642445eb644SLorenzo Bianconi mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
643445eb644SLorenzo Bianconi mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
644445eb644SLorenzo Bianconi mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
645445eb644SLorenzo Bianconi
646445eb644SLorenzo Bianconi mtk_setup_bridge_switch(eth);
647445eb644SLorenzo Bianconi }
648445eb644SLorenzo Bianconi
649b8fc9f30SRené van Dorst return;
6508ca7f4feSsean.wang@mediatek.com
6518ca7f4feSsean.wang@mediatek.com err_phy:
652b8fc9f30SRené van Dorst dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
653b8fc9f30SRené van Dorst mac->id, phy_modes(state->interface));
6547e538372SRené van Dorst return;
6557e538372SRené van Dorst
6567e538372SRené van Dorst init_err:
6577e538372SRené van Dorst dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
6587e538372SRené van Dorst mac->id, phy_modes(state->interface), err);
659656e7052SJohn Crispin }
660656e7052SJohn Crispin
mtk_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)6610e37ad71SRussell King (Oracle) static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
6620e37ad71SRussell King (Oracle) phy_interface_t interface)
6630e37ad71SRussell King (Oracle) {
6640e37ad71SRussell King (Oracle) struct mtk_mac *mac = container_of(config, struct mtk_mac,
6650e37ad71SRussell King (Oracle) phylink_config);
66621089867SRussell King (Oracle) struct mtk_eth *eth = mac->hw;
6670e37ad71SRussell King (Oracle) u32 mcr_cur, mcr_new;
6680e37ad71SRussell King (Oracle)
66921089867SRussell King (Oracle) /* Enable SGMII */
67021089867SRussell King (Oracle) if (interface == PHY_INTERFACE_MODE_SGMII ||
67121089867SRussell King (Oracle) phy_interface_mode_is_8023z(interface))
67221089867SRussell King (Oracle) regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
67321089867SRussell King (Oracle) SYSCFG0_SGMII_MASK, mac->syscfg0);
67421089867SRussell King (Oracle)
6750e37ad71SRussell King (Oracle) /* Setup gmac */
6760e37ad71SRussell King (Oracle) mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
6770e37ad71SRussell King (Oracle) mcr_new = mcr_cur;
6780e37ad71SRussell King (Oracle) mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
679*1f32abb4SDaniel Golle MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
6800e37ad71SRussell King (Oracle)
6810e37ad71SRussell King (Oracle) /* Only update control register when needed! */
6820e37ad71SRussell King (Oracle) if (mcr_new != mcr_cur)
6830e37ad71SRussell King (Oracle) mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
6840e37ad71SRussell King (Oracle)
6850e37ad71SRussell King (Oracle) return 0;
6860e37ad71SRussell King (Oracle) }
6870e37ad71SRussell King (Oracle)
mtk_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)688b8fc9f30SRené van Dorst static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
689b8fc9f30SRené van Dorst phy_interface_t interface)
690b8fc9f30SRené van Dorst {
691b8fc9f30SRené van Dorst struct mtk_mac *mac = container_of(config, struct mtk_mac,
692b8fc9f30SRené van Dorst phylink_config);
693b8fc9f30SRené van Dorst u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
694b8fc9f30SRené van Dorst
695*1f32abb4SDaniel Golle mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
696b8fc9f30SRené van Dorst mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
697b8fc9f30SRené van Dorst }
698b8fc9f30SRené van Dorst
mtk_set_queue_speed(struct mtk_eth * eth,unsigned int idx,int speed)699f63959c7SFelix Fietkau static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
700f63959c7SFelix Fietkau int speed)
701f63959c7SFelix Fietkau {
702f63959c7SFelix Fietkau const struct mtk_soc_data *soc = eth->soc;
703f63959c7SFelix Fietkau u32 ofs, val;
704f63959c7SFelix Fietkau
705f63959c7SFelix Fietkau if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
706f63959c7SFelix Fietkau return;
707f63959c7SFelix Fietkau
708f63959c7SFelix Fietkau val = MTK_QTX_SCH_MIN_RATE_EN |
709f63959c7SFelix Fietkau /* minimum: 10 Mbps */
710f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
711f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
712f63959c7SFelix Fietkau MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
713a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v1(eth))
714f63959c7SFelix Fietkau val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
715f63959c7SFelix Fietkau
716f63959c7SFelix Fietkau if (IS_ENABLED(CONFIG_SOC_MT7621)) {
717f63959c7SFelix Fietkau switch (speed) {
718f63959c7SFelix Fietkau case SPEED_10:
719f63959c7SFelix Fietkau val |= MTK_QTX_SCH_MAX_RATE_EN |
720f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
721f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
722f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
723f63959c7SFelix Fietkau break;
724f63959c7SFelix Fietkau case SPEED_100:
725f63959c7SFelix Fietkau val |= MTK_QTX_SCH_MAX_RATE_EN |
726f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
727f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
728f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
729f63959c7SFelix Fietkau break;
730f63959c7SFelix Fietkau case SPEED_1000:
731f63959c7SFelix Fietkau val |= MTK_QTX_SCH_MAX_RATE_EN |
732f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
733f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
734f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
735f63959c7SFelix Fietkau break;
736f63959c7SFelix Fietkau default:
737f63959c7SFelix Fietkau break;
738f63959c7SFelix Fietkau }
739f63959c7SFelix Fietkau } else {
740f63959c7SFelix Fietkau switch (speed) {
741f63959c7SFelix Fietkau case SPEED_10:
742f63959c7SFelix Fietkau val |= MTK_QTX_SCH_MAX_RATE_EN |
743f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
744f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
745f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
746f63959c7SFelix Fietkau break;
747f63959c7SFelix Fietkau case SPEED_100:
748f63959c7SFelix Fietkau val |= MTK_QTX_SCH_MAX_RATE_EN |
749f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
750f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
751f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
752f63959c7SFelix Fietkau break;
753f63959c7SFelix Fietkau case SPEED_1000:
754f63959c7SFelix Fietkau val |= MTK_QTX_SCH_MAX_RATE_EN |
755f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
756f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
757f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
758f63959c7SFelix Fietkau break;
759f63959c7SFelix Fietkau default:
760f63959c7SFelix Fietkau break;
761f63959c7SFelix Fietkau }
762f63959c7SFelix Fietkau }
763f63959c7SFelix Fietkau
764f63959c7SFelix Fietkau ofs = MTK_QTX_OFFSET * idx;
765f63959c7SFelix Fietkau mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
766f63959c7SFelix Fietkau }
767f63959c7SFelix Fietkau
mtk_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)76891a208f2SRussell King static void mtk_mac_link_up(struct phylink_config *config,
76991a208f2SRussell King struct phy_device *phy,
77091a208f2SRussell King unsigned int mode, phy_interface_t interface,
77191a208f2SRussell King int speed, int duplex, bool tx_pause, bool rx_pause)
772b8fc9f30SRené van Dorst {
773b8fc9f30SRené van Dorst struct mtk_mac *mac = container_of(config, struct mtk_mac,
774b8fc9f30SRené van Dorst phylink_config);
775a4591873SRussell King u32 mcr;
776b8fc9f30SRené van Dorst
777a4591873SRussell King mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
77875674e31SRussell King mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
77975674e31SRussell King MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
78075674e31SRussell King MAC_MCR_FORCE_RX_FC);
78175674e31SRussell King
78275674e31SRussell King /* Configure speed */
783e669ce46SFelix Fietkau mac->speed = speed;
78475674e31SRussell King switch (speed) {
78575674e31SRussell King case SPEED_2500:
78675674e31SRussell King case SPEED_1000:
78775674e31SRussell King mcr |= MAC_MCR_SPEED_1000;
78875674e31SRussell King break;
78975674e31SRussell King case SPEED_100:
79075674e31SRussell King mcr |= MAC_MCR_SPEED_100;
79175674e31SRussell King break;
79275674e31SRussell King }
79375674e31SRussell King
79475674e31SRussell King /* Configure duplex */
79575674e31SRussell King if (duplex == DUPLEX_FULL)
79675674e31SRussell King mcr |= MAC_MCR_FORCE_DPX;
79775674e31SRussell King
79875674e31SRussell King /* Configure pause modes - phylink will avoid these for half duplex */
79975674e31SRussell King if (tx_pause)
80075674e31SRussell King mcr |= MAC_MCR_FORCE_TX_FC;
80175674e31SRussell King if (rx_pause)
80275674e31SRussell King mcr |= MAC_MCR_FORCE_RX_FC;
80375674e31SRussell King
804*1f32abb4SDaniel Golle mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
805b8fc9f30SRené van Dorst mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
806b8fc9f30SRené van Dorst }
807b8fc9f30SRené van Dorst
808b8fc9f30SRené van Dorst static const struct phylink_mac_ops mtk_phylink_ops = {
80914a44ab0SRussell King (Oracle) .mac_select_pcs = mtk_mac_select_pcs,
810b8fc9f30SRené van Dorst .mac_config = mtk_mac_config,
8110e37ad71SRussell King (Oracle) .mac_finish = mtk_mac_finish,
812b8fc9f30SRené van Dorst .mac_link_down = mtk_mac_link_down,
813b8fc9f30SRené van Dorst .mac_link_up = mtk_mac_link_up,
814b8fc9f30SRené van Dorst };
815b8fc9f30SRené van Dorst
mtk_mdio_init(struct mtk_eth * eth)816656e7052SJohn Crispin static int mtk_mdio_init(struct mtk_eth *eth)
817656e7052SJohn Crispin {
818c0a44003SDaniel Golle unsigned int max_clk = 2500000, divider;
819656e7052SJohn Crispin struct device_node *mii_np;
8201e515b7fSSean Wang int ret;
821c0a44003SDaniel Golle u32 val;
822656e7052SJohn Crispin
823656e7052SJohn Crispin mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
824656e7052SJohn Crispin if (!mii_np) {
825656e7052SJohn Crispin dev_err(eth->dev, "no %s child node found", "mdio-bus");
826656e7052SJohn Crispin return -ENODEV;
827656e7052SJohn Crispin }
828656e7052SJohn Crispin
829656e7052SJohn Crispin if (!of_device_is_available(mii_np)) {
830aa6e8a54SSean Wang ret = -ENODEV;
831656e7052SJohn Crispin goto err_put_node;
832656e7052SJohn Crispin }
833656e7052SJohn Crispin
8341e515b7fSSean Wang eth->mii_bus = devm_mdiobus_alloc(eth->dev);
835656e7052SJohn Crispin if (!eth->mii_bus) {
8361e515b7fSSean Wang ret = -ENOMEM;
837656e7052SJohn Crispin goto err_put_node;
838656e7052SJohn Crispin }
839656e7052SJohn Crispin
840656e7052SJohn Crispin eth->mii_bus->name = "mdio";
84190088837SAndrew Lunn eth->mii_bus->read = mtk_mdio_read_c22;
84290088837SAndrew Lunn eth->mii_bus->write = mtk_mdio_write_c22;
84390088837SAndrew Lunn eth->mii_bus->read_c45 = mtk_mdio_read_c45;
84490088837SAndrew Lunn eth->mii_bus->write_c45 = mtk_mdio_write_c45;
845656e7052SJohn Crispin eth->mii_bus->priv = eth;
846656e7052SJohn Crispin eth->mii_bus->parent = eth->dev;
847656e7052SJohn Crispin
84821c328dcSRob Herring snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
849c0a44003SDaniel Golle
850c0a44003SDaniel Golle if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
851c0a44003SDaniel Golle if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
852c0a44003SDaniel Golle dev_err(eth->dev, "MDIO clock frequency out of range");
853c0a44003SDaniel Golle ret = -EINVAL;
854c0a44003SDaniel Golle goto err_put_node;
855c0a44003SDaniel Golle }
856c0a44003SDaniel Golle max_clk = val;
857c0a44003SDaniel Golle }
858c0a44003SDaniel Golle divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
859c0a44003SDaniel Golle
860445eb644SLorenzo Bianconi /* Configure MDC Turbo Mode */
861445eb644SLorenzo Bianconi if (mtk_is_netsys_v3_or_greater(eth))
862445eb644SLorenzo Bianconi mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
863445eb644SLorenzo Bianconi
864c0a44003SDaniel Golle /* Configure MDC Divider */
865445eb644SLorenzo Bianconi val = FIELD_PREP(PPSC_MDC_CFG, divider);
866445eb644SLorenzo Bianconi if (!mtk_is_netsys_v3_or_greater(eth))
867445eb644SLorenzo Bianconi val |= PPSC_MDC_TURBO;
868445eb644SLorenzo Bianconi mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
869c0a44003SDaniel Golle
870c0a44003SDaniel Golle dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
871c0a44003SDaniel Golle
8721e515b7fSSean Wang ret = of_mdiobus_register(eth->mii_bus, mii_np);
873656e7052SJohn Crispin
874656e7052SJohn Crispin err_put_node:
875656e7052SJohn Crispin of_node_put(mii_np);
8761e515b7fSSean Wang return ret;
877656e7052SJohn Crispin }
878656e7052SJohn Crispin
mtk_mdio_cleanup(struct mtk_eth * eth)879656e7052SJohn Crispin static void mtk_mdio_cleanup(struct mtk_eth *eth)
880656e7052SJohn Crispin {
881656e7052SJohn Crispin if (!eth->mii_bus)
882656e7052SJohn Crispin return;
883656e7052SJohn Crispin
884656e7052SJohn Crispin mdiobus_unregister(eth->mii_bus);
885656e7052SJohn Crispin }
886656e7052SJohn Crispin
mtk_tx_irq_disable(struct mtk_eth * eth,u32 mask)8875cce0322SJohn Crispin static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
888656e7052SJohn Crispin {
8897bc9ccecSJohn Crispin unsigned long flags;
890656e7052SJohn Crispin u32 val;
891656e7052SJohn Crispin
8925cce0322SJohn Crispin spin_lock_irqsave(ð->tx_irq_lock, flags);
8938cb42714SLorenzo Bianconi val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
8948cb42714SLorenzo Bianconi mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
8955cce0322SJohn Crispin spin_unlock_irqrestore(ð->tx_irq_lock, flags);
896656e7052SJohn Crispin }
897656e7052SJohn Crispin
mtk_tx_irq_enable(struct mtk_eth * eth,u32 mask)8985cce0322SJohn Crispin static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
899656e7052SJohn Crispin {
9007bc9ccecSJohn Crispin unsigned long flags;
901656e7052SJohn Crispin u32 val;
902656e7052SJohn Crispin
9035cce0322SJohn Crispin spin_lock_irqsave(ð->tx_irq_lock, flags);
9048cb42714SLorenzo Bianconi val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
9058cb42714SLorenzo Bianconi mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
9065cce0322SJohn Crispin spin_unlock_irqrestore(ð->tx_irq_lock, flags);
9075cce0322SJohn Crispin }
9085cce0322SJohn Crispin
mtk_rx_irq_disable(struct mtk_eth * eth,u32 mask)9095cce0322SJohn Crispin static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
9105cce0322SJohn Crispin {
9115cce0322SJohn Crispin unsigned long flags;
9125cce0322SJohn Crispin u32 val;
9135cce0322SJohn Crispin
9145cce0322SJohn Crispin spin_lock_irqsave(ð->rx_irq_lock, flags);
9158cb42714SLorenzo Bianconi val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
9168cb42714SLorenzo Bianconi mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
9175cce0322SJohn Crispin spin_unlock_irqrestore(ð->rx_irq_lock, flags);
9185cce0322SJohn Crispin }
9195cce0322SJohn Crispin
mtk_rx_irq_enable(struct mtk_eth * eth,u32 mask)9205cce0322SJohn Crispin static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
9215cce0322SJohn Crispin {
9225cce0322SJohn Crispin unsigned long flags;
9235cce0322SJohn Crispin u32 val;
9245cce0322SJohn Crispin
9255cce0322SJohn Crispin spin_lock_irqsave(ð->rx_irq_lock, flags);
9268cb42714SLorenzo Bianconi val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
9278cb42714SLorenzo Bianconi mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
9285cce0322SJohn Crispin spin_unlock_irqrestore(ð->rx_irq_lock, flags);
929656e7052SJohn Crispin }
930656e7052SJohn Crispin
mtk_set_mac_address(struct net_device * dev,void * p)931656e7052SJohn Crispin static int mtk_set_mac_address(struct net_device *dev, void *p)
932656e7052SJohn Crispin {
933656e7052SJohn Crispin int ret = eth_mac_addr(dev, p);
934656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
935296c9120SStefan Roese struct mtk_eth *eth = mac->hw;
936656e7052SJohn Crispin const char *macaddr = dev->dev_addr;
937656e7052SJohn Crispin
938656e7052SJohn Crispin if (ret)
939656e7052SJohn Crispin return ret;
940656e7052SJohn Crispin
941dce6fa42SSean Wang if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
942dce6fa42SSean Wang return -EBUSY;
943dce6fa42SSean Wang
944e3e9652aSSean Wang spin_lock_bh(&mac->hw->page_lock);
945296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
946296c9120SStefan Roese mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
947296c9120SStefan Roese MT7628_SDM_MAC_ADRH);
948296c9120SStefan Roese mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
949296c9120SStefan Roese (macaddr[4] << 8) | macaddr[5],
950296c9120SStefan Roese MT7628_SDM_MAC_ADRL);
951296c9120SStefan Roese } else {
952656e7052SJohn Crispin mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
953656e7052SJohn Crispin MTK_GDMA_MAC_ADRH(mac->id));
954656e7052SJohn Crispin mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
955656e7052SJohn Crispin (macaddr[4] << 8) | macaddr[5],
956656e7052SJohn Crispin MTK_GDMA_MAC_ADRL(mac->id));
957296c9120SStefan Roese }
958e3e9652aSSean Wang spin_unlock_bh(&mac->hw->page_lock);
959656e7052SJohn Crispin
960656e7052SJohn Crispin return 0;
961656e7052SJohn Crispin }
962656e7052SJohn Crispin
mtk_stats_update_mac(struct mtk_mac * mac)963656e7052SJohn Crispin void mtk_stats_update_mac(struct mtk_mac *mac)
964656e7052SJohn Crispin {
965656e7052SJohn Crispin struct mtk_hw_stats *hw_stats = mac->hw_stats;
966ad79fd2cSStefan Roese struct mtk_eth *eth = mac->hw;
967656e7052SJohn Crispin
968656e7052SJohn Crispin u64_stats_update_begin(&hw_stats->syncp);
969656e7052SJohn Crispin
970ad79fd2cSStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
971ad79fd2cSStefan Roese hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
972ad79fd2cSStefan Roese hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
973ad79fd2cSStefan Roese hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
974ad79fd2cSStefan Roese hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
975ad79fd2cSStefan Roese hw_stats->rx_checksum_errors +=
976ad79fd2cSStefan Roese mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
977ad79fd2cSStefan Roese } else {
9788cb42714SLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
979ad79fd2cSStefan Roese unsigned int offs = hw_stats->reg_offset;
980ad79fd2cSStefan Roese u64 stats;
981ad79fd2cSStefan Roese
9828cb42714SLorenzo Bianconi hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
9838cb42714SLorenzo Bianconi stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
984656e7052SJohn Crispin if (stats)
985656e7052SJohn Crispin hw_stats->rx_bytes += (stats << 32);
986ad79fd2cSStefan Roese hw_stats->rx_packets +=
9878cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
988ad79fd2cSStefan Roese hw_stats->rx_overflow +=
9898cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
990ad79fd2cSStefan Roese hw_stats->rx_fcs_errors +=
9918cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
992ad79fd2cSStefan Roese hw_stats->rx_short_errors +=
9938cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
994ad79fd2cSStefan Roese hw_stats->rx_long_errors +=
9958cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
996ad79fd2cSStefan Roese hw_stats->rx_checksum_errors +=
9978cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
998656e7052SJohn Crispin hw_stats->rx_flow_control_packets +=
9998cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
10001953f134SLorenzo Bianconi
10011953f134SLorenzo Bianconi if (mtk_is_netsys_v3_or_greater(eth)) {
10021953f134SLorenzo Bianconi hw_stats->tx_skip +=
10031953f134SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
10041953f134SLorenzo Bianconi hw_stats->tx_collisions +=
10051953f134SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
10061953f134SLorenzo Bianconi hw_stats->tx_bytes +=
10071953f134SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
10081953f134SLorenzo Bianconi stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
10091953f134SLorenzo Bianconi if (stats)
10101953f134SLorenzo Bianconi hw_stats->tx_bytes += (stats << 32);
10111953f134SLorenzo Bianconi hw_stats->tx_packets +=
10121953f134SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
10131953f134SLorenzo Bianconi } else {
1014ad79fd2cSStefan Roese hw_stats->tx_skip +=
10158cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1016ad79fd2cSStefan Roese hw_stats->tx_collisions +=
10178cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1018ad79fd2cSStefan Roese hw_stats->tx_bytes +=
10198cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
10208cb42714SLorenzo Bianconi stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1021656e7052SJohn Crispin if (stats)
1022656e7052SJohn Crispin hw_stats->tx_bytes += (stats << 32);
1023ad79fd2cSStefan Roese hw_stats->tx_packets +=
10248cb42714SLorenzo Bianconi mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1025ad79fd2cSStefan Roese }
10261953f134SLorenzo Bianconi }
1027ad79fd2cSStefan Roese
1028656e7052SJohn Crispin u64_stats_update_end(&hw_stats->syncp);
1029656e7052SJohn Crispin }
1030656e7052SJohn Crispin
mtk_stats_update(struct mtk_eth * eth)1031656e7052SJohn Crispin static void mtk_stats_update(struct mtk_eth *eth)
1032656e7052SJohn Crispin {
1033656e7052SJohn Crispin int i;
1034656e7052SJohn Crispin
1035e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
1036656e7052SJohn Crispin if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1037656e7052SJohn Crispin continue;
1038656e7052SJohn Crispin if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
1039656e7052SJohn Crispin mtk_stats_update_mac(eth->mac[i]);
1040656e7052SJohn Crispin spin_unlock(ð->mac[i]->hw_stats->stats_lock);
1041656e7052SJohn Crispin }
1042656e7052SJohn Crispin }
1043656e7052SJohn Crispin }
1044656e7052SJohn Crispin
mtk_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1045bc1f4470Sstephen hemminger static void mtk_get_stats64(struct net_device *dev,
1046656e7052SJohn Crispin struct rtnl_link_stats64 *storage)
1047656e7052SJohn Crispin {
1048656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
1049656e7052SJohn Crispin struct mtk_hw_stats *hw_stats = mac->hw_stats;
1050656e7052SJohn Crispin unsigned int start;
1051656e7052SJohn Crispin
1052656e7052SJohn Crispin if (netif_running(dev) && netif_device_present(dev)) {
10538d32e062SSean Wang if (spin_trylock_bh(&hw_stats->stats_lock)) {
1054656e7052SJohn Crispin mtk_stats_update_mac(mac);
10558d32e062SSean Wang spin_unlock_bh(&hw_stats->stats_lock);
1056656e7052SJohn Crispin }
1057656e7052SJohn Crispin }
1058656e7052SJohn Crispin
1059656e7052SJohn Crispin do {
1060068c38adSThomas Gleixner start = u64_stats_fetch_begin(&hw_stats->syncp);
1061656e7052SJohn Crispin storage->rx_packets = hw_stats->rx_packets;
1062656e7052SJohn Crispin storage->tx_packets = hw_stats->tx_packets;
1063656e7052SJohn Crispin storage->rx_bytes = hw_stats->rx_bytes;
1064656e7052SJohn Crispin storage->tx_bytes = hw_stats->tx_bytes;
1065656e7052SJohn Crispin storage->collisions = hw_stats->tx_collisions;
1066656e7052SJohn Crispin storage->rx_length_errors = hw_stats->rx_short_errors +
1067656e7052SJohn Crispin hw_stats->rx_long_errors;
1068656e7052SJohn Crispin storage->rx_over_errors = hw_stats->rx_overflow;
1069656e7052SJohn Crispin storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1070656e7052SJohn Crispin storage->rx_errors = hw_stats->rx_checksum_errors;
1071656e7052SJohn Crispin storage->tx_aborted_errors = hw_stats->tx_skip;
1072068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1073656e7052SJohn Crispin
1074656e7052SJohn Crispin storage->tx_errors = dev->stats.tx_errors;
1075656e7052SJohn Crispin storage->rx_dropped = dev->stats.rx_dropped;
1076656e7052SJohn Crispin storage->tx_dropped = dev->stats.tx_dropped;
1077656e7052SJohn Crispin }
1078656e7052SJohn Crispin
mtk_max_frag_size(int mtu)1079656e7052SJohn Crispin static inline int mtk_max_frag_size(int mtu)
1080656e7052SJohn Crispin {
1081656e7052SJohn Crispin /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
10824fd59792SDENG Qingfang if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
10834fd59792SDENG Qingfang mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1084656e7052SJohn Crispin
1085656e7052SJohn Crispin return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1086656e7052SJohn Crispin SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1087656e7052SJohn Crispin }
1088656e7052SJohn Crispin
mtk_max_buf_size(int frag_size)1089656e7052SJohn Crispin static inline int mtk_max_buf_size(int frag_size)
1090656e7052SJohn Crispin {
1091656e7052SJohn Crispin int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1092656e7052SJohn Crispin SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1093656e7052SJohn Crispin
10944fd59792SDENG Qingfang WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1095656e7052SJohn Crispin
1096656e7052SJohn Crispin return buf_size;
1097656e7052SJohn Crispin }
1098656e7052SJohn Crispin
mtk_rx_get_desc(struct mtk_eth * eth,struct mtk_rx_dma_v2 * rxd,struct mtk_rx_dma_v2 * dma_rxd)1099160d3a9bSLorenzo Bianconi static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1100160d3a9bSLorenzo Bianconi struct mtk_rx_dma_v2 *dma_rxd)
1101656e7052SJohn Crispin {
1102656e7052SJohn Crispin rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1103816ac3e6SFelix Fietkau if (!(rxd->rxd2 & RX_DMA_DONE))
1104816ac3e6SFelix Fietkau return false;
1105816ac3e6SFelix Fietkau
1106816ac3e6SFelix Fietkau rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1107656e7052SJohn Crispin rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1108656e7052SJohn Crispin rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1109a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth)) {
1110160d3a9bSLorenzo Bianconi rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1111160d3a9bSLorenzo Bianconi rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1112160d3a9bSLorenzo Bianconi }
1113816ac3e6SFelix Fietkau
1114816ac3e6SFelix Fietkau return true;
1115656e7052SJohn Crispin }
1116656e7052SJohn Crispin
mtk_max_lro_buf_alloc(gfp_t gfp_mask)11172f2c0d29SChen Lin static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
11182f2c0d29SChen Lin {
11192f2c0d29SChen Lin unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
11202f2c0d29SChen Lin unsigned long data;
11212f2c0d29SChen Lin
11222f2c0d29SChen Lin data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
11232f2c0d29SChen Lin get_order(size));
11242f2c0d29SChen Lin
11252f2c0d29SChen Lin return (void *)data;
11262f2c0d29SChen Lin }
11272f2c0d29SChen Lin
1128656e7052SJohn Crispin /* the qdma core needs scratch memory to be setup */
mtk_init_fq_dma(struct mtk_eth * eth)1129656e7052SJohn Crispin static int mtk_init_fq_dma(struct mtk_eth *eth)
1130656e7052SJohn Crispin {
1131eb067347SLorenzo Bianconi const struct mtk_soc_data *soc = eth->soc;
1132605e4fe4SJohn Crispin dma_addr_t phy_ring_tail;
1133c30e0b9bSFelix Fietkau int cnt = MTK_QDMA_RING_SIZE;
1134656e7052SJohn Crispin dma_addr_t dma_addr;
1135656e7052SJohn Crispin int i;
1136656e7052SJohn Crispin
1137ebb1e4f9SDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
1138ebb1e4f9SDaniel Golle eth->scratch_ring = eth->sram_base;
1139ebb1e4f9SDaniel Golle else
1140d776a57eSFelix Fietkau eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1141eb067347SLorenzo Bianconi cnt * soc->txrx.txd_size,
1142605e4fe4SJohn Crispin ð->phy_scratch_ring,
114362dfb4ccSLorenzo Bianconi GFP_KERNEL);
1144656e7052SJohn Crispin if (unlikely(!eth->scratch_ring))
1145656e7052SJohn Crispin return -ENOMEM;
1146656e7052SJohn Crispin
1147eb067347SLorenzo Bianconi eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1148562c5a70SJohn Crispin if (unlikely(!eth->scratch_head))
1149562c5a70SJohn Crispin return -ENOMEM;
1150562c5a70SJohn Crispin
1151d776a57eSFelix Fietkau dma_addr = dma_map_single(eth->dma_dev,
1152656e7052SJohn Crispin eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1153656e7052SJohn Crispin DMA_FROM_DEVICE);
1154d776a57eSFelix Fietkau if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1155656e7052SJohn Crispin return -ENOMEM;
1156656e7052SJohn Crispin
1157eb067347SLorenzo Bianconi phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1158656e7052SJohn Crispin
1159656e7052SJohn Crispin for (i = 0; i < cnt; i++) {
1160160d3a9bSLorenzo Bianconi struct mtk_tx_dma_v2 *txd;
1161eb067347SLorenzo Bianconi
11624d642690SLorenzo Bianconi txd = eth->scratch_ring + i * soc->txrx.txd_size;
1163eb067347SLorenzo Bianconi txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1164656e7052SJohn Crispin if (i < cnt - 1)
1165eb067347SLorenzo Bianconi txd->txd2 = eth->phy_scratch_ring +
1166eb067347SLorenzo Bianconi (i + 1) * soc->txrx.txd_size;
1167eb067347SLorenzo Bianconi
1168eb067347SLorenzo Bianconi txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1169eb067347SLorenzo Bianconi txd->txd4 = 0;
1170a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth)) {
1171160d3a9bSLorenzo Bianconi txd->txd5 = 0;
1172160d3a9bSLorenzo Bianconi txd->txd6 = 0;
1173160d3a9bSLorenzo Bianconi txd->txd7 = 0;
1174160d3a9bSLorenzo Bianconi txd->txd8 = 0;
1175160d3a9bSLorenzo Bianconi }
1176656e7052SJohn Crispin }
1177656e7052SJohn Crispin
11788cb42714SLorenzo Bianconi mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
11798cb42714SLorenzo Bianconi mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
11808cb42714SLorenzo Bianconi mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
11818cb42714SLorenzo Bianconi mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1182656e7052SJohn Crispin
1183656e7052SJohn Crispin return 0;
1184656e7052SJohn Crispin }
1185656e7052SJohn Crispin
mtk_qdma_phys_to_virt(struct mtk_tx_ring * ring,u32 desc)11867173eca8SLorenzo Bianconi static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1187656e7052SJohn Crispin {
11887173eca8SLorenzo Bianconi return ring->dma + (desc - ring->phys);
1189656e7052SJohn Crispin }
1190656e7052SJohn Crispin
mtk_desc_to_tx_buf(struct mtk_tx_ring * ring,void * txd,u32 txd_size)1191c4fd06c2SLorenzo Bianconi static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
11927173eca8SLorenzo Bianconi void *txd, u32 txd_size)
1193656e7052SJohn Crispin {
11947173eca8SLorenzo Bianconi int idx = (txd - ring->dma) / txd_size;
1195656e7052SJohn Crispin
1196656e7052SJohn Crispin return &ring->buf[idx];
1197656e7052SJohn Crispin }
1198656e7052SJohn Crispin
qdma_to_pdma(struct mtk_tx_ring * ring,struct mtk_tx_dma * dma)1199296c9120SStefan Roese static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1200296c9120SStefan Roese struct mtk_tx_dma *dma)
1201296c9120SStefan Roese {
12027173eca8SLorenzo Bianconi return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1203296c9120SStefan Roese }
1204296c9120SStefan Roese
txd_to_idx(struct mtk_tx_ring * ring,void * dma,u32 txd_size)12057173eca8SLorenzo Bianconi static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1206296c9120SStefan Roese {
12077173eca8SLorenzo Bianconi return (dma - ring->dma) / txd_size;
1208296c9120SStefan Roese }
1209296c9120SStefan Roese
mtk_tx_unmap(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct xdp_frame_bulk * bq,bool napi)1210c30c4a82SFelix Fietkau static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1211853246dbSLorenzo Bianconi struct xdp_frame_bulk *bq, bool napi)
1212656e7052SJohn Crispin {
1213296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1214656e7052SJohn Crispin if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1215d776a57eSFelix Fietkau dma_unmap_single(eth->dma_dev,
1216656e7052SJohn Crispin dma_unmap_addr(tx_buf, dma_addr0),
1217656e7052SJohn Crispin dma_unmap_len(tx_buf, dma_len0),
1218656e7052SJohn Crispin DMA_TO_DEVICE);
1219656e7052SJohn Crispin } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1220d776a57eSFelix Fietkau dma_unmap_page(eth->dma_dev,
1221656e7052SJohn Crispin dma_unmap_addr(tx_buf, dma_addr0),
1222656e7052SJohn Crispin dma_unmap_len(tx_buf, dma_len0),
1223656e7052SJohn Crispin DMA_TO_DEVICE);
1224656e7052SJohn Crispin }
1225296c9120SStefan Roese } else {
1226296c9120SStefan Roese if (dma_unmap_len(tx_buf, dma_len0)) {
1227d776a57eSFelix Fietkau dma_unmap_page(eth->dma_dev,
1228296c9120SStefan Roese dma_unmap_addr(tx_buf, dma_addr0),
1229296c9120SStefan Roese dma_unmap_len(tx_buf, dma_len0),
1230296c9120SStefan Roese DMA_TO_DEVICE);
1231296c9120SStefan Roese }
1232296c9120SStefan Roese
1233296c9120SStefan Roese if (dma_unmap_len(tx_buf, dma_len1)) {
1234d776a57eSFelix Fietkau dma_unmap_page(eth->dma_dev,
1235296c9120SStefan Roese dma_unmap_addr(tx_buf, dma_addr1),
1236296c9120SStefan Roese dma_unmap_len(tx_buf, dma_len1),
1237296c9120SStefan Roese DMA_TO_DEVICE);
1238296c9120SStefan Roese }
1239296c9120SStefan Roese }
1240296c9120SStefan Roese
1241155738a4SLorenzo Bianconi if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
12425886d26fSLorenzo Bianconi if (tx_buf->type == MTK_TYPE_SKB) {
12435886d26fSLorenzo Bianconi struct sk_buff *skb = tx_buf->data;
12445886d26fSLorenzo Bianconi
1245c30c4a82SFelix Fietkau if (napi)
12465886d26fSLorenzo Bianconi napi_consume_skb(skb, napi);
1247c30c4a82SFelix Fietkau else
12485886d26fSLorenzo Bianconi dev_kfree_skb_any(skb);
1249155738a4SLorenzo Bianconi } else {
12505886d26fSLorenzo Bianconi struct xdp_frame *xdpf = tx_buf->data;
12515886d26fSLorenzo Bianconi
12525886d26fSLorenzo Bianconi if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
12535886d26fSLorenzo Bianconi xdp_return_frame_rx_napi(xdpf);
1254853246dbSLorenzo Bianconi else if (bq)
1255853246dbSLorenzo Bianconi xdp_return_frame_bulk(xdpf, bq);
12565886d26fSLorenzo Bianconi else
12575886d26fSLorenzo Bianconi xdp_return_frame(xdpf);
12585886d26fSLorenzo Bianconi }
1259155738a4SLorenzo Bianconi }
12605886d26fSLorenzo Bianconi tx_buf->flags = 0;
12615886d26fSLorenzo Bianconi tx_buf->data = NULL;
1262656e7052SJohn Crispin }
1263656e7052SJohn Crispin
setup_tx_buf(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct mtk_tx_dma * txd,dma_addr_t mapped_addr,size_t size,int idx)1264296c9120SStefan Roese static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1265296c9120SStefan Roese struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1266296c9120SStefan Roese size_t size, int idx)
1267296c9120SStefan Roese {
1268296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1269296c9120SStefan Roese dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1270296c9120SStefan Roese dma_unmap_len_set(tx_buf, dma_len0, size);
1271296c9120SStefan Roese } else {
1272296c9120SStefan Roese if (idx & 1) {
1273296c9120SStefan Roese txd->txd3 = mapped_addr;
1274296c9120SStefan Roese txd->txd2 |= TX_DMA_PLEN1(size);
1275296c9120SStefan Roese dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1276296c9120SStefan Roese dma_unmap_len_set(tx_buf, dma_len1, size);
1277296c9120SStefan Roese } else {
12785886d26fSLorenzo Bianconi tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1279296c9120SStefan Roese txd->txd1 = mapped_addr;
1280296c9120SStefan Roese txd->txd2 = TX_DMA_PLEN0(size);
1281296c9120SStefan Roese dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1282296c9120SStefan Roese dma_unmap_len_set(tx_buf, dma_len0, size);
1283296c9120SStefan Roese }
1284296c9120SStefan Roese }
1285296c9120SStefan Roese }
1286296c9120SStefan Roese
mtk_tx_set_dma_desc_v1(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1287160d3a9bSLorenzo Bianconi static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1288731f3fd6SLorenzo Bianconi struct mtk_tx_dma_desc_info *info)
1289731f3fd6SLorenzo Bianconi {
1290731f3fd6SLorenzo Bianconi struct mtk_mac *mac = netdev_priv(dev);
1291160d3a9bSLorenzo Bianconi struct mtk_eth *eth = mac->hw;
1292160d3a9bSLorenzo Bianconi struct mtk_tx_dma *desc = txd;
1293731f3fd6SLorenzo Bianconi u32 data;
1294731f3fd6SLorenzo Bianconi
1295731f3fd6SLorenzo Bianconi WRITE_ONCE(desc->txd1, info->addr);
1296731f3fd6SLorenzo Bianconi
1297f63959c7SFelix Fietkau data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1298f63959c7SFelix Fietkau FIELD_PREP(TX_DMA_PQID, info->qid);
1299731f3fd6SLorenzo Bianconi if (info->last)
1300731f3fd6SLorenzo Bianconi data |= TX_DMA_LS0;
1301731f3fd6SLorenzo Bianconi WRITE_ONCE(desc->txd3, data);
1302731f3fd6SLorenzo Bianconi
1303731f3fd6SLorenzo Bianconi data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1304731f3fd6SLorenzo Bianconi if (info->first) {
1305731f3fd6SLorenzo Bianconi if (info->gso)
1306731f3fd6SLorenzo Bianconi data |= TX_DMA_TSO;
1307731f3fd6SLorenzo Bianconi /* tx checksum offload */
1308731f3fd6SLorenzo Bianconi if (info->csum)
1309731f3fd6SLorenzo Bianconi data |= TX_DMA_CHKSUM;
1310731f3fd6SLorenzo Bianconi /* vlan header offload */
1311731f3fd6SLorenzo Bianconi if (info->vlan)
1312731f3fd6SLorenzo Bianconi data |= TX_DMA_INS_VLAN | info->vlan_tci;
1313731f3fd6SLorenzo Bianconi }
1314731f3fd6SLorenzo Bianconi WRITE_ONCE(desc->txd4, data);
1315731f3fd6SLorenzo Bianconi }
1316731f3fd6SLorenzo Bianconi
mtk_tx_set_dma_desc_v2(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1317160d3a9bSLorenzo Bianconi static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1318160d3a9bSLorenzo Bianconi struct mtk_tx_dma_desc_info *info)
1319160d3a9bSLorenzo Bianconi {
1320160d3a9bSLorenzo Bianconi struct mtk_mac *mac = netdev_priv(dev);
1321160d3a9bSLorenzo Bianconi struct mtk_tx_dma_v2 *desc = txd;
1322160d3a9bSLorenzo Bianconi struct mtk_eth *eth = mac->hw;
1323160d3a9bSLorenzo Bianconi u32 data;
1324160d3a9bSLorenzo Bianconi
1325160d3a9bSLorenzo Bianconi WRITE_ONCE(desc->txd1, info->addr);
1326160d3a9bSLorenzo Bianconi
1327160d3a9bSLorenzo Bianconi data = TX_DMA_PLEN0(info->size);
1328160d3a9bSLorenzo Bianconi if (info->last)
1329160d3a9bSLorenzo Bianconi data |= TX_DMA_LS0;
13302d75891eSDaniel Golle
13312d75891eSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
13322d75891eSDaniel Golle data |= TX_DMA_PREP_ADDR64(info->addr);
13332d75891eSDaniel Golle
1334160d3a9bSLorenzo Bianconi WRITE_ONCE(desc->txd3, data);
1335160d3a9bSLorenzo Bianconi
1336445eb644SLorenzo Bianconi /* set forward port */
1337445eb644SLorenzo Bianconi switch (mac->id) {
1338445eb644SLorenzo Bianconi case MTK_GMAC1_ID:
1339445eb644SLorenzo Bianconi data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1340445eb644SLorenzo Bianconi break;
1341445eb644SLorenzo Bianconi case MTK_GMAC2_ID:
1342445eb644SLorenzo Bianconi data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1343445eb644SLorenzo Bianconi break;
1344445eb644SLorenzo Bianconi case MTK_GMAC3_ID:
1345445eb644SLorenzo Bianconi data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1346445eb644SLorenzo Bianconi break;
1347445eb644SLorenzo Bianconi }
1348445eb644SLorenzo Bianconi
1349160d3a9bSLorenzo Bianconi data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1350160d3a9bSLorenzo Bianconi WRITE_ONCE(desc->txd4, data);
1351160d3a9bSLorenzo Bianconi
1352160d3a9bSLorenzo Bianconi data = 0;
1353160d3a9bSLorenzo Bianconi if (info->first) {
1354160d3a9bSLorenzo Bianconi if (info->gso)
1355160d3a9bSLorenzo Bianconi data |= TX_DMA_TSO_V2;
1356160d3a9bSLorenzo Bianconi /* tx checksum offload */
1357160d3a9bSLorenzo Bianconi if (info->csum)
1358160d3a9bSLorenzo Bianconi data |= TX_DMA_CHKSUM_V2;
13591953f134SLorenzo Bianconi if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
13601953f134SLorenzo Bianconi data |= TX_DMA_SPTAG_V3;
1361160d3a9bSLorenzo Bianconi }
1362160d3a9bSLorenzo Bianconi WRITE_ONCE(desc->txd5, data);
1363160d3a9bSLorenzo Bianconi
1364160d3a9bSLorenzo Bianconi data = 0;
1365160d3a9bSLorenzo Bianconi if (info->first && info->vlan)
1366160d3a9bSLorenzo Bianconi data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1367160d3a9bSLorenzo Bianconi WRITE_ONCE(desc->txd6, data);
1368160d3a9bSLorenzo Bianconi
1369160d3a9bSLorenzo Bianconi WRITE_ONCE(desc->txd7, 0);
1370160d3a9bSLorenzo Bianconi WRITE_ONCE(desc->txd8, 0);
1371160d3a9bSLorenzo Bianconi }
1372160d3a9bSLorenzo Bianconi
mtk_tx_set_dma_desc(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1373160d3a9bSLorenzo Bianconi static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1374160d3a9bSLorenzo Bianconi struct mtk_tx_dma_desc_info *info)
1375160d3a9bSLorenzo Bianconi {
1376160d3a9bSLorenzo Bianconi struct mtk_mac *mac = netdev_priv(dev);
1377160d3a9bSLorenzo Bianconi struct mtk_eth *eth = mac->hw;
1378160d3a9bSLorenzo Bianconi
1379a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth))
1380160d3a9bSLorenzo Bianconi mtk_tx_set_dma_desc_v2(dev, txd, info);
1381160d3a9bSLorenzo Bianconi else
1382160d3a9bSLorenzo Bianconi mtk_tx_set_dma_desc_v1(dev, txd, info);
1383160d3a9bSLorenzo Bianconi }
1384160d3a9bSLorenzo Bianconi
mtk_tx_map(struct sk_buff * skb,struct net_device * dev,int tx_num,struct mtk_tx_ring * ring,bool gso)1385656e7052SJohn Crispin static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1386656e7052SJohn Crispin int tx_num, struct mtk_tx_ring *ring, bool gso)
1387656e7052SJohn Crispin {
1388731f3fd6SLorenzo Bianconi struct mtk_tx_dma_desc_info txd_info = {
1389731f3fd6SLorenzo Bianconi .size = skb_headlen(skb),
1390731f3fd6SLorenzo Bianconi .gso = gso,
1391731f3fd6SLorenzo Bianconi .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1392731f3fd6SLorenzo Bianconi .vlan = skb_vlan_tag_present(skb),
1393f63959c7SFelix Fietkau .qid = skb_get_queue_mapping(skb),
1394731f3fd6SLorenzo Bianconi .vlan_tci = skb_vlan_tag_get(skb),
1395731f3fd6SLorenzo Bianconi .first = true,
1396731f3fd6SLorenzo Bianconi .last = !skb_is_nonlinear(skb),
1397731f3fd6SLorenzo Bianconi };
1398f63959c7SFelix Fietkau struct netdev_queue *txq;
1399656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
1400656e7052SJohn Crispin struct mtk_eth *eth = mac->hw;
1401c4fd06c2SLorenzo Bianconi const struct mtk_soc_data *soc = eth->soc;
1402656e7052SJohn Crispin struct mtk_tx_dma *itxd, *txd;
1403296c9120SStefan Roese struct mtk_tx_dma *itxd_pdma, *txd_pdma;
140481d2dd09SSean Wang struct mtk_tx_buf *itx_buf, *tx_buf;
1405656e7052SJohn Crispin int i, n_desc = 1;
1406f63959c7SFelix Fietkau int queue = skb_get_queue_mapping(skb);
1407296c9120SStefan Roese int k = 0;
1408656e7052SJohn Crispin
1409f63959c7SFelix Fietkau txq = netdev_get_tx_queue(dev, queue);
1410656e7052SJohn Crispin itxd = ring->next_free;
1411296c9120SStefan Roese itxd_pdma = qdma_to_pdma(ring, itxd);
1412656e7052SJohn Crispin if (itxd == ring->last_free)
1413656e7052SJohn Crispin return -ENOMEM;
1414656e7052SJohn Crispin
1415c4fd06c2SLorenzo Bianconi itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
141681d2dd09SSean Wang memset(itx_buf, 0, sizeof(*itx_buf));
1417656e7052SJohn Crispin
1418731f3fd6SLorenzo Bianconi txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1419731f3fd6SLorenzo Bianconi DMA_TO_DEVICE);
1420731f3fd6SLorenzo Bianconi if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1421656e7052SJohn Crispin return -ENOMEM;
1422656e7052SJohn Crispin
1423731f3fd6SLorenzo Bianconi mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1424731f3fd6SLorenzo Bianconi
142581d2dd09SSean Wang itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
14261953f134SLorenzo Bianconi itx_buf->mac_id = mac->id;
1427731f3fd6SLorenzo Bianconi setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1428296c9120SStefan Roese k++);
1429656e7052SJohn Crispin
1430656e7052SJohn Crispin /* TX SG offload */
1431656e7052SJohn Crispin txd = itxd;
1432296c9120SStefan Roese txd_pdma = qdma_to_pdma(ring, txd);
1433296c9120SStefan Roese
1434731f3fd6SLorenzo Bianconi for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1435d7840976SMatthew Wilcox (Oracle) skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1436656e7052SJohn Crispin unsigned int offset = 0;
1437656e7052SJohn Crispin int frag_size = skb_frag_size(frag);
1438656e7052SJohn Crispin
1439656e7052SJohn Crispin while (frag_size) {
1440296c9120SStefan Roese bool new_desc = true;
1441656e7052SJohn Crispin
1442c4fd06c2SLorenzo Bianconi if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1443296c9120SStefan Roese (i & 0x1)) {
1444656e7052SJohn Crispin txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1445296c9120SStefan Roese txd_pdma = qdma_to_pdma(ring, txd);
1446656e7052SJohn Crispin if (txd == ring->last_free)
1447656e7052SJohn Crispin goto err_dma;
1448656e7052SJohn Crispin
1449656e7052SJohn Crispin n_desc++;
1450296c9120SStefan Roese } else {
1451296c9120SStefan Roese new_desc = false;
1452296c9120SStefan Roese }
1453296c9120SStefan Roese
1454731f3fd6SLorenzo Bianconi memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1455160d3a9bSLorenzo Bianconi txd_info.size = min_t(unsigned int, frag_size,
1456160d3a9bSLorenzo Bianconi soc->txrx.dma_max_len);
1457f63959c7SFelix Fietkau txd_info.qid = queue;
1458731f3fd6SLorenzo Bianconi txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1459731f3fd6SLorenzo Bianconi !(frag_size - txd_info.size);
1460731f3fd6SLorenzo Bianconi txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1461731f3fd6SLorenzo Bianconi offset, txd_info.size,
1462656e7052SJohn Crispin DMA_TO_DEVICE);
1463731f3fd6SLorenzo Bianconi if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1464656e7052SJohn Crispin goto err_dma;
1465656e7052SJohn Crispin
1466731f3fd6SLorenzo Bianconi mtk_tx_set_dma_desc(dev, txd, &txd_info);
1467656e7052SJohn Crispin
1468c4fd06c2SLorenzo Bianconi tx_buf = mtk_desc_to_tx_buf(ring, txd,
1469c4fd06c2SLorenzo Bianconi soc->txrx.txd_size);
1470296c9120SStefan Roese if (new_desc)
1471656e7052SJohn Crispin memset(tx_buf, 0, sizeof(*tx_buf));
14725886d26fSLorenzo Bianconi tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1473656e7052SJohn Crispin tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
14741953f134SLorenzo Bianconi tx_buf->mac_id = mac->id;
1475134d2152SSean Wang
1476731f3fd6SLorenzo Bianconi setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1477731f3fd6SLorenzo Bianconi txd_info.size, k++);
1478296c9120SStefan Roese
1479731f3fd6SLorenzo Bianconi frag_size -= txd_info.size;
1480731f3fd6SLorenzo Bianconi offset += txd_info.size;
1481656e7052SJohn Crispin }
1482656e7052SJohn Crispin }
1483656e7052SJohn Crispin
1484656e7052SJohn Crispin /* store skb to cleanup */
14855886d26fSLorenzo Bianconi itx_buf->type = MTK_TYPE_SKB;
14865886d26fSLorenzo Bianconi itx_buf->data = skb;
1487656e7052SJohn Crispin
1488c4fd06c2SLorenzo Bianconi if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1489296c9120SStefan Roese if (k & 0x1)
1490296c9120SStefan Roese txd_pdma->txd2 |= TX_DMA_LS0;
1491296c9120SStefan Roese else
1492296c9120SStefan Roese txd_pdma->txd2 |= TX_DMA_LS1;
1493296c9120SStefan Roese }
1494656e7052SJohn Crispin
1495f63959c7SFelix Fietkau netdev_tx_sent_queue(txq, skb->len);
1496656e7052SJohn Crispin skb_tx_timestamp(skb);
1497656e7052SJohn Crispin
1498656e7052SJohn Crispin ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1499656e7052SJohn Crispin atomic_sub(n_desc, &ring->free_count);
1500656e7052SJohn Crispin
1501656e7052SJohn Crispin /* make sure that all changes to the dma ring are flushed before we
1502656e7052SJohn Crispin * continue
1503656e7052SJohn Crispin */
1504656e7052SJohn Crispin wmb();
1505656e7052SJohn Crispin
1506c4fd06c2SLorenzo Bianconi if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1507f63959c7SFelix Fietkau if (netif_xmit_stopped(txq) || !netdev_xmit_more())
15088cb42714SLorenzo Bianconi mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1509296c9120SStefan Roese } else {
1510e70a5634SLorenzo Bianconi int next_idx;
1511e70a5634SLorenzo Bianconi
1512e70a5634SLorenzo Bianconi next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1513296c9120SStefan Roese ring->dma_size);
1514296c9120SStefan Roese mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1515296c9120SStefan Roese }
1516656e7052SJohn Crispin
1517656e7052SJohn Crispin return 0;
1518656e7052SJohn Crispin
1519656e7052SJohn Crispin err_dma:
1520656e7052SJohn Crispin do {
1521c4fd06c2SLorenzo Bianconi tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1522656e7052SJohn Crispin
1523656e7052SJohn Crispin /* unmap dma */
1524853246dbSLorenzo Bianconi mtk_tx_unmap(eth, tx_buf, NULL, false);
1525656e7052SJohn Crispin
1526656e7052SJohn Crispin itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1527c4fd06c2SLorenzo Bianconi if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1528296c9120SStefan Roese itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1529296c9120SStefan Roese
1530656e7052SJohn Crispin itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1531296c9120SStefan Roese itxd_pdma = qdma_to_pdma(ring, itxd);
1532656e7052SJohn Crispin } while (itxd != txd);
1533656e7052SJohn Crispin
1534656e7052SJohn Crispin return -ENOMEM;
1535656e7052SJohn Crispin }
1536656e7052SJohn Crispin
mtk_cal_txd_req(struct mtk_eth * eth,struct sk_buff * skb)1537160d3a9bSLorenzo Bianconi static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1538656e7052SJohn Crispin {
1539160d3a9bSLorenzo Bianconi int i, nfrags = 1;
1540d7840976SMatthew Wilcox (Oracle) skb_frag_t *frag;
1541656e7052SJohn Crispin
1542656e7052SJohn Crispin if (skb_is_gso(skb)) {
1543656e7052SJohn Crispin for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1544656e7052SJohn Crispin frag = &skb_shinfo(skb)->frags[i];
154592493a2fSMatthew Wilcox (Oracle) nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1546160d3a9bSLorenzo Bianconi eth->soc->txrx.dma_max_len);
1547656e7052SJohn Crispin }
1548656e7052SJohn Crispin } else {
1549656e7052SJohn Crispin nfrags += skb_shinfo(skb)->nr_frags;
1550656e7052SJohn Crispin }
1551656e7052SJohn Crispin
1552beeb4ca4SJohn Crispin return nfrags;
1553656e7052SJohn Crispin }
1554656e7052SJohn Crispin
mtk_queue_stopped(struct mtk_eth * eth)1555ad3cba98SJohn Crispin static int mtk_queue_stopped(struct mtk_eth *eth)
1556ad3cba98SJohn Crispin {
1557ad3cba98SJohn Crispin int i;
1558ad3cba98SJohn Crispin
1559e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
1560ad3cba98SJohn Crispin if (!eth->netdev[i])
1561ad3cba98SJohn Crispin continue;
1562ad3cba98SJohn Crispin if (netif_queue_stopped(eth->netdev[i]))
1563ad3cba98SJohn Crispin return 1;
1564ad3cba98SJohn Crispin }
1565ad3cba98SJohn Crispin
1566ad3cba98SJohn Crispin return 0;
1567ad3cba98SJohn Crispin }
1568ad3cba98SJohn Crispin
mtk_wake_queue(struct mtk_eth * eth)156913c822f6SJohn Crispin static void mtk_wake_queue(struct mtk_eth *eth)
157013c822f6SJohn Crispin {
157113c822f6SJohn Crispin int i;
157213c822f6SJohn Crispin
1573e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
157413c822f6SJohn Crispin if (!eth->netdev[i])
157513c822f6SJohn Crispin continue;
1576f63959c7SFelix Fietkau netif_tx_wake_all_queues(eth->netdev[i]);
157713c822f6SJohn Crispin }
157813c822f6SJohn Crispin }
157913c822f6SJohn Crispin
mtk_start_xmit(struct sk_buff * skb,struct net_device * dev)1580e910a399SYunjian Wang static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1581656e7052SJohn Crispin {
1582656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
1583656e7052SJohn Crispin struct mtk_eth *eth = mac->hw;
1584656e7052SJohn Crispin struct mtk_tx_ring *ring = ð->tx_ring;
1585656e7052SJohn Crispin struct net_device_stats *stats = &dev->stats;
1586656e7052SJohn Crispin bool gso = false;
1587656e7052SJohn Crispin int tx_num;
1588656e7052SJohn Crispin
158934c2e4c9SJohn Crispin /* normally we can rely on the stack not calling this more than once,
159034c2e4c9SJohn Crispin * however we have 2 queues running on the same ring so we need to lock
159134c2e4c9SJohn Crispin * the ring access
159234c2e4c9SJohn Crispin */
1593e3e9652aSSean Wang spin_lock(ð->page_lock);
159434c2e4c9SJohn Crispin
1595dce6fa42SSean Wang if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1596dce6fa42SSean Wang goto drop;
1597dce6fa42SSean Wang
1598160d3a9bSLorenzo Bianconi tx_num = mtk_cal_txd_req(eth, skb);
1599656e7052SJohn Crispin if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1600f63959c7SFelix Fietkau netif_tx_stop_all_queues(dev);
1601656e7052SJohn Crispin netif_err(eth, tx_queued, dev,
1602656e7052SJohn Crispin "Tx Ring full when queue awake!\n");
1603e3e9652aSSean Wang spin_unlock(ð->page_lock);
1604656e7052SJohn Crispin return NETDEV_TX_BUSY;
1605656e7052SJohn Crispin }
1606656e7052SJohn Crispin
1607656e7052SJohn Crispin /* TSO: fill MSS info in tcp checksum field */
1608656e7052SJohn Crispin if (skb_is_gso(skb)) {
1609656e7052SJohn Crispin if (skb_cow_head(skb, 0)) {
1610656e7052SJohn Crispin netif_warn(eth, tx_err, dev,
1611656e7052SJohn Crispin "GSO expand head fail.\n");
1612656e7052SJohn Crispin goto drop;
1613656e7052SJohn Crispin }
1614656e7052SJohn Crispin
1615656e7052SJohn Crispin if (skb_shinfo(skb)->gso_type &
1616656e7052SJohn Crispin (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1617656e7052SJohn Crispin gso = true;
1618656e7052SJohn Crispin tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1619656e7052SJohn Crispin }
1620656e7052SJohn Crispin }
1621656e7052SJohn Crispin
1622656e7052SJohn Crispin if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1623656e7052SJohn Crispin goto drop;
1624656e7052SJohn Crispin
162582c6544dSJohn Crispin if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1626f63959c7SFelix Fietkau netif_tx_stop_all_queues(dev);
162782c6544dSJohn Crispin
1628e3e9652aSSean Wang spin_unlock(ð->page_lock);
1629656e7052SJohn Crispin
1630656e7052SJohn Crispin return NETDEV_TX_OK;
1631656e7052SJohn Crispin
1632656e7052SJohn Crispin drop:
1633e3e9652aSSean Wang spin_unlock(ð->page_lock);
1634656e7052SJohn Crispin stats->tx_dropped++;
163581ad2b7dSWei Yongjun dev_kfree_skb_any(skb);
1636656e7052SJohn Crispin return NETDEV_TX_OK;
1637656e7052SJohn Crispin }
1638656e7052SJohn Crispin
mtk_get_rx_ring(struct mtk_eth * eth)1639ee406810SNelson Chang static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1640ee406810SNelson Chang {
1641ee406810SNelson Chang int i;
1642ee406810SNelson Chang struct mtk_rx_ring *ring;
1643ee406810SNelson Chang int idx;
1644ee406810SNelson Chang
1645ee406810SNelson Chang if (!eth->hwlro)
1646ee406810SNelson Chang return ð->rx_ring[0];
1647ee406810SNelson Chang
1648ee406810SNelson Chang for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1649649a7527SLorenzo Bianconi struct mtk_rx_dma *rxd;
1650649a7527SLorenzo Bianconi
1651ee406810SNelson Chang ring = ð->rx_ring[i];
165208df5fa6SStefan Roese idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
16537173eca8SLorenzo Bianconi rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1654649a7527SLorenzo Bianconi if (rxd->rxd2 & RX_DMA_DONE) {
1655ee406810SNelson Chang ring->calc_idx_update = true;
1656ee406810SNelson Chang return ring;
1657ee406810SNelson Chang }
1658ee406810SNelson Chang }
1659ee406810SNelson Chang
1660ee406810SNelson Chang return NULL;
1661ee406810SNelson Chang }
1662ee406810SNelson Chang
mtk_update_rx_cpu_idx(struct mtk_eth * eth)1663ee406810SNelson Chang static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1664ee406810SNelson Chang {
1665ee406810SNelson Chang struct mtk_rx_ring *ring;
1666ee406810SNelson Chang int i;
1667ee406810SNelson Chang
1668ee406810SNelson Chang if (!eth->hwlro) {
1669ee406810SNelson Chang ring = ð->rx_ring[0];
1670ee406810SNelson Chang mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1671ee406810SNelson Chang } else {
1672ee406810SNelson Chang for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1673ee406810SNelson Chang ring = ð->rx_ring[i];
1674ee406810SNelson Chang if (ring->calc_idx_update) {
1675ee406810SNelson Chang ring->calc_idx_update = false;
1676ee406810SNelson Chang mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1677ee406810SNelson Chang }
1678ee406810SNelson Chang }
1679ee406810SNelson Chang }
1680ee406810SNelson Chang }
1681ee406810SNelson Chang
mtk_page_pool_enabled(struct mtk_eth * eth)16827c26c20dSLorenzo Bianconi static bool mtk_page_pool_enabled(struct mtk_eth *eth)
16837c26c20dSLorenzo Bianconi {
168458ea461bSLorenzo Bianconi return mtk_is_netsys_v2_or_greater(eth);
16857c26c20dSLorenzo Bianconi }
16867c26c20dSLorenzo Bianconi
mtk_create_page_pool(struct mtk_eth * eth,struct xdp_rxq_info * xdp_q,int id,int size)168723233e57SLorenzo Bianconi static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
168823233e57SLorenzo Bianconi struct xdp_rxq_info *xdp_q,
168923233e57SLorenzo Bianconi int id, int size)
169023233e57SLorenzo Bianconi {
169123233e57SLorenzo Bianconi struct page_pool_params pp_params = {
169223233e57SLorenzo Bianconi .order = 0,
169323233e57SLorenzo Bianconi .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
169423233e57SLorenzo Bianconi .pool_size = size,
169523233e57SLorenzo Bianconi .nid = NUMA_NO_NODE,
169623233e57SLorenzo Bianconi .dev = eth->dma_dev,
169723233e57SLorenzo Bianconi .offset = MTK_PP_HEADROOM,
169823233e57SLorenzo Bianconi .max_len = MTK_PP_MAX_BUF_SIZE,
169923233e57SLorenzo Bianconi };
170023233e57SLorenzo Bianconi struct page_pool *pp;
170123233e57SLorenzo Bianconi int err;
170223233e57SLorenzo Bianconi
17035886d26fSLorenzo Bianconi pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
17045886d26fSLorenzo Bianconi : DMA_FROM_DEVICE;
170523233e57SLorenzo Bianconi pp = page_pool_create(&pp_params);
170623233e57SLorenzo Bianconi if (IS_ERR(pp))
170723233e57SLorenzo Bianconi return pp;
170823233e57SLorenzo Bianconi
1709c966153dSTariq Toukan err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id,
1710c966153dSTariq Toukan eth->rx_napi.napi_id, PAGE_SIZE);
171123233e57SLorenzo Bianconi if (err < 0)
171223233e57SLorenzo Bianconi goto err_free_pp;
171323233e57SLorenzo Bianconi
171423233e57SLorenzo Bianconi err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
171523233e57SLorenzo Bianconi if (err)
171623233e57SLorenzo Bianconi goto err_unregister_rxq;
171723233e57SLorenzo Bianconi
171823233e57SLorenzo Bianconi return pp;
171923233e57SLorenzo Bianconi
172023233e57SLorenzo Bianconi err_unregister_rxq:
172123233e57SLorenzo Bianconi xdp_rxq_info_unreg(xdp_q);
172223233e57SLorenzo Bianconi err_free_pp:
172323233e57SLorenzo Bianconi page_pool_destroy(pp);
172423233e57SLorenzo Bianconi
172523233e57SLorenzo Bianconi return ERR_PTR(err);
172623233e57SLorenzo Bianconi }
172723233e57SLorenzo Bianconi
mtk_page_pool_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)172823233e57SLorenzo Bianconi static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
172923233e57SLorenzo Bianconi gfp_t gfp_mask)
173023233e57SLorenzo Bianconi {
173123233e57SLorenzo Bianconi struct page *page;
173223233e57SLorenzo Bianconi
173323233e57SLorenzo Bianconi page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
173423233e57SLorenzo Bianconi if (!page)
173523233e57SLorenzo Bianconi return NULL;
173623233e57SLorenzo Bianconi
173723233e57SLorenzo Bianconi *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
173823233e57SLorenzo Bianconi return page_address(page);
173923233e57SLorenzo Bianconi }
174023233e57SLorenzo Bianconi
mtk_rx_put_buff(struct mtk_rx_ring * ring,void * data,bool napi)174123233e57SLorenzo Bianconi static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
174223233e57SLorenzo Bianconi {
174323233e57SLorenzo Bianconi if (ring->page_pool)
174423233e57SLorenzo Bianconi page_pool_put_full_page(ring->page_pool,
174523233e57SLorenzo Bianconi virt_to_head_page(data), napi);
174623233e57SLorenzo Bianconi else
174723233e57SLorenzo Bianconi skb_free_frag(data);
174823233e57SLorenzo Bianconi }
174923233e57SLorenzo Bianconi
mtk_xdp_frame_map(struct mtk_eth * eth,struct net_device * dev,struct mtk_tx_dma_desc_info * txd_info,struct mtk_tx_dma * txd,struct mtk_tx_buf * tx_buf,void * data,u16 headroom,int index,bool dma_map)1750b16fe6d8SLorenzo Bianconi static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1751b16fe6d8SLorenzo Bianconi struct mtk_tx_dma_desc_info *txd_info,
1752b16fe6d8SLorenzo Bianconi struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1753b16fe6d8SLorenzo Bianconi void *data, u16 headroom, int index, bool dma_map)
1754b16fe6d8SLorenzo Bianconi {
1755b16fe6d8SLorenzo Bianconi struct mtk_tx_ring *ring = ð->tx_ring;
1756b16fe6d8SLorenzo Bianconi struct mtk_mac *mac = netdev_priv(dev);
1757b16fe6d8SLorenzo Bianconi struct mtk_tx_dma *txd_pdma;
1758b16fe6d8SLorenzo Bianconi
1759b16fe6d8SLorenzo Bianconi if (dma_map) { /* ndo_xdp_xmit */
1760b16fe6d8SLorenzo Bianconi txd_info->addr = dma_map_single(eth->dma_dev, data,
1761b16fe6d8SLorenzo Bianconi txd_info->size, DMA_TO_DEVICE);
1762b16fe6d8SLorenzo Bianconi if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1763b16fe6d8SLorenzo Bianconi return -ENOMEM;
1764b16fe6d8SLorenzo Bianconi
1765b16fe6d8SLorenzo Bianconi tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1766b16fe6d8SLorenzo Bianconi } else {
1767b16fe6d8SLorenzo Bianconi struct page *page = virt_to_head_page(data);
1768b16fe6d8SLorenzo Bianconi
1769b16fe6d8SLorenzo Bianconi txd_info->addr = page_pool_get_dma_addr(page) +
1770b16fe6d8SLorenzo Bianconi sizeof(struct xdp_frame) + headroom;
1771b16fe6d8SLorenzo Bianconi dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1772b16fe6d8SLorenzo Bianconi txd_info->size, DMA_BIDIRECTIONAL);
1773b16fe6d8SLorenzo Bianconi }
1774b16fe6d8SLorenzo Bianconi mtk_tx_set_dma_desc(dev, txd, txd_info);
1775b16fe6d8SLorenzo Bianconi
17761953f134SLorenzo Bianconi tx_buf->mac_id = mac->id;
1777155738a4SLorenzo Bianconi tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1778155738a4SLorenzo Bianconi tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1779b16fe6d8SLorenzo Bianconi
1780b16fe6d8SLorenzo Bianconi txd_pdma = qdma_to_pdma(ring, txd);
1781b16fe6d8SLorenzo Bianconi setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1782b16fe6d8SLorenzo Bianconi index);
1783b16fe6d8SLorenzo Bianconi
1784b16fe6d8SLorenzo Bianconi return 0;
1785b16fe6d8SLorenzo Bianconi }
1786b16fe6d8SLorenzo Bianconi
mtk_xdp_submit_frame(struct mtk_eth * eth,struct xdp_frame * xdpf,struct net_device * dev,bool dma_map)17875886d26fSLorenzo Bianconi static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
17885886d26fSLorenzo Bianconi struct net_device *dev, bool dma_map)
17895886d26fSLorenzo Bianconi {
1790155738a4SLorenzo Bianconi struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
17915886d26fSLorenzo Bianconi const struct mtk_soc_data *soc = eth->soc;
17925886d26fSLorenzo Bianconi struct mtk_tx_ring *ring = ð->tx_ring;
1793f63959c7SFelix Fietkau struct mtk_mac *mac = netdev_priv(dev);
17945886d26fSLorenzo Bianconi struct mtk_tx_dma_desc_info txd_info = {
17955886d26fSLorenzo Bianconi .size = xdpf->len,
17965886d26fSLorenzo Bianconi .first = true,
1797155738a4SLorenzo Bianconi .last = !xdp_frame_has_frags(xdpf),
1798f63959c7SFelix Fietkau .qid = mac->id,
17995886d26fSLorenzo Bianconi };
1800155738a4SLorenzo Bianconi int err, index = 0, n_desc = 1, nr_frags;
1801155738a4SLorenzo Bianconi struct mtk_tx_buf *htx_buf, *tx_buf;
1802a64bb2b0SLorenzo Bianconi struct mtk_tx_dma *htxd, *txd;
1803155738a4SLorenzo Bianconi void *data = xdpf->data;
18045886d26fSLorenzo Bianconi
18055886d26fSLorenzo Bianconi if (unlikely(test_bit(MTK_RESETTING, ð->state)))
18065886d26fSLorenzo Bianconi return -EBUSY;
18075886d26fSLorenzo Bianconi
1808155738a4SLorenzo Bianconi nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1809155738a4SLorenzo Bianconi if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
18105886d26fSLorenzo Bianconi return -EBUSY;
18115886d26fSLorenzo Bianconi
18125886d26fSLorenzo Bianconi spin_lock(ð->page_lock);
18135886d26fSLorenzo Bianconi
18145886d26fSLorenzo Bianconi txd = ring->next_free;
18155886d26fSLorenzo Bianconi if (txd == ring->last_free) {
1816155738a4SLorenzo Bianconi spin_unlock(ð->page_lock);
1817155738a4SLorenzo Bianconi return -ENOMEM;
18185886d26fSLorenzo Bianconi }
1819155738a4SLorenzo Bianconi htxd = txd;
18205886d26fSLorenzo Bianconi
18215886d26fSLorenzo Bianconi tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
18225886d26fSLorenzo Bianconi memset(tx_buf, 0, sizeof(*tx_buf));
1823155738a4SLorenzo Bianconi htx_buf = tx_buf;
18245886d26fSLorenzo Bianconi
1825155738a4SLorenzo Bianconi for (;;) {
1826b16fe6d8SLorenzo Bianconi err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1827155738a4SLorenzo Bianconi data, xdpf->headroom, index, dma_map);
1828b16fe6d8SLorenzo Bianconi if (err < 0)
1829155738a4SLorenzo Bianconi goto unmap;
18305886d26fSLorenzo Bianconi
1831155738a4SLorenzo Bianconi if (txd_info.last)
1832155738a4SLorenzo Bianconi break;
1833155738a4SLorenzo Bianconi
1834155738a4SLorenzo Bianconi if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1835155738a4SLorenzo Bianconi txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1836155738a4SLorenzo Bianconi if (txd == ring->last_free)
1837155738a4SLorenzo Bianconi goto unmap;
1838155738a4SLorenzo Bianconi
1839155738a4SLorenzo Bianconi tx_buf = mtk_desc_to_tx_buf(ring, txd,
1840155738a4SLorenzo Bianconi soc->txrx.txd_size);
1841155738a4SLorenzo Bianconi memset(tx_buf, 0, sizeof(*tx_buf));
1842155738a4SLorenzo Bianconi n_desc++;
1843155738a4SLorenzo Bianconi }
1844155738a4SLorenzo Bianconi
1845155738a4SLorenzo Bianconi memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1846155738a4SLorenzo Bianconi txd_info.size = skb_frag_size(&sinfo->frags[index]);
1847155738a4SLorenzo Bianconi txd_info.last = index + 1 == nr_frags;
1848f63959c7SFelix Fietkau txd_info.qid = mac->id;
1849155738a4SLorenzo Bianconi data = skb_frag_address(&sinfo->frags[index]);
1850155738a4SLorenzo Bianconi
1851155738a4SLorenzo Bianconi index++;
1852155738a4SLorenzo Bianconi }
18535886d26fSLorenzo Bianconi /* store xdpf for cleanup */
1854155738a4SLorenzo Bianconi htx_buf->data = xdpf;
18555886d26fSLorenzo Bianconi
18565886d26fSLorenzo Bianconi if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1857a64bb2b0SLorenzo Bianconi struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1858a64bb2b0SLorenzo Bianconi
18595886d26fSLorenzo Bianconi if (index & 1)
18605886d26fSLorenzo Bianconi txd_pdma->txd2 |= TX_DMA_LS0;
18615886d26fSLorenzo Bianconi else
18625886d26fSLorenzo Bianconi txd_pdma->txd2 |= TX_DMA_LS1;
18635886d26fSLorenzo Bianconi }
18645886d26fSLorenzo Bianconi
18655886d26fSLorenzo Bianconi ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
18665886d26fSLorenzo Bianconi atomic_sub(n_desc, &ring->free_count);
18675886d26fSLorenzo Bianconi
18685886d26fSLorenzo Bianconi /* make sure that all changes to the dma ring are flushed before we
18695886d26fSLorenzo Bianconi * continue
18705886d26fSLorenzo Bianconi */
18715886d26fSLorenzo Bianconi wmb();
18725886d26fSLorenzo Bianconi
18735886d26fSLorenzo Bianconi if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
18745886d26fSLorenzo Bianconi mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
18755886d26fSLorenzo Bianconi } else {
18765886d26fSLorenzo Bianconi int idx;
18775886d26fSLorenzo Bianconi
18785886d26fSLorenzo Bianconi idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
18795886d26fSLorenzo Bianconi mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
18805886d26fSLorenzo Bianconi MT7628_TX_CTX_IDX0);
18815886d26fSLorenzo Bianconi }
1882155738a4SLorenzo Bianconi
1883155738a4SLorenzo Bianconi spin_unlock(ð->page_lock);
1884155738a4SLorenzo Bianconi
1885155738a4SLorenzo Bianconi return 0;
1886155738a4SLorenzo Bianconi
1887155738a4SLorenzo Bianconi unmap:
1888155738a4SLorenzo Bianconi while (htxd != txd) {
1889155738a4SLorenzo Bianconi tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1890853246dbSLorenzo Bianconi mtk_tx_unmap(eth, tx_buf, NULL, false);
1891155738a4SLorenzo Bianconi
1892155738a4SLorenzo Bianconi htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1893a64bb2b0SLorenzo Bianconi if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1894a64bb2b0SLorenzo Bianconi struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1895a64bb2b0SLorenzo Bianconi
1896155738a4SLorenzo Bianconi txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1897a64bb2b0SLorenzo Bianconi }
1898155738a4SLorenzo Bianconi
1899155738a4SLorenzo Bianconi htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1900155738a4SLorenzo Bianconi }
1901155738a4SLorenzo Bianconi
19025886d26fSLorenzo Bianconi spin_unlock(ð->page_lock);
19035886d26fSLorenzo Bianconi
19045886d26fSLorenzo Bianconi return err;
19055886d26fSLorenzo Bianconi }
19065886d26fSLorenzo Bianconi
mtk_xdp_xmit(struct net_device * dev,int num_frame,struct xdp_frame ** frames,u32 flags)19075886d26fSLorenzo Bianconi static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
19085886d26fSLorenzo Bianconi struct xdp_frame **frames, u32 flags)
19095886d26fSLorenzo Bianconi {
19105886d26fSLorenzo Bianconi struct mtk_mac *mac = netdev_priv(dev);
19115886d26fSLorenzo Bianconi struct mtk_hw_stats *hw_stats = mac->hw_stats;
19125886d26fSLorenzo Bianconi struct mtk_eth *eth = mac->hw;
19135886d26fSLorenzo Bianconi int i, nxmit = 0;
19145886d26fSLorenzo Bianconi
19155886d26fSLorenzo Bianconi if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
19165886d26fSLorenzo Bianconi return -EINVAL;
19175886d26fSLorenzo Bianconi
19185886d26fSLorenzo Bianconi for (i = 0; i < num_frame; i++) {
19195886d26fSLorenzo Bianconi if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
19205886d26fSLorenzo Bianconi break;
19215886d26fSLorenzo Bianconi nxmit++;
19225886d26fSLorenzo Bianconi }
19235886d26fSLorenzo Bianconi
19245886d26fSLorenzo Bianconi u64_stats_update_begin(&hw_stats->syncp);
19255886d26fSLorenzo Bianconi hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
19265886d26fSLorenzo Bianconi hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
19275886d26fSLorenzo Bianconi u64_stats_update_end(&hw_stats->syncp);
19285886d26fSLorenzo Bianconi
19295886d26fSLorenzo Bianconi return nxmit;
19305886d26fSLorenzo Bianconi }
19315886d26fSLorenzo Bianconi
mtk_xdp_run(struct mtk_eth * eth,struct mtk_rx_ring * ring,struct xdp_buff * xdp,struct net_device * dev)19327c26c20dSLorenzo Bianconi static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
19337c26c20dSLorenzo Bianconi struct xdp_buff *xdp, struct net_device *dev)
19347c26c20dSLorenzo Bianconi {
1935916a6ee8SLorenzo Bianconi struct mtk_mac *mac = netdev_priv(dev);
1936916a6ee8SLorenzo Bianconi struct mtk_hw_stats *hw_stats = mac->hw_stats;
1937916a6ee8SLorenzo Bianconi u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
19387c26c20dSLorenzo Bianconi struct bpf_prog *prog;
19397c26c20dSLorenzo Bianconi u32 act = XDP_PASS;
19407c26c20dSLorenzo Bianconi
19417c26c20dSLorenzo Bianconi rcu_read_lock();
19427c26c20dSLorenzo Bianconi
19437c26c20dSLorenzo Bianconi prog = rcu_dereference(eth->prog);
19447c26c20dSLorenzo Bianconi if (!prog)
19457c26c20dSLorenzo Bianconi goto out;
19467c26c20dSLorenzo Bianconi
19477c26c20dSLorenzo Bianconi act = bpf_prog_run_xdp(prog, xdp);
19487c26c20dSLorenzo Bianconi switch (act) {
19497c26c20dSLorenzo Bianconi case XDP_PASS:
1950916a6ee8SLorenzo Bianconi count = &hw_stats->xdp_stats.rx_xdp_pass;
1951916a6ee8SLorenzo Bianconi goto update_stats;
19527c26c20dSLorenzo Bianconi case XDP_REDIRECT:
19537c26c20dSLorenzo Bianconi if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
19547c26c20dSLorenzo Bianconi act = XDP_DROP;
19557c26c20dSLorenzo Bianconi break;
19567c26c20dSLorenzo Bianconi }
1957916a6ee8SLorenzo Bianconi
1958916a6ee8SLorenzo Bianconi count = &hw_stats->xdp_stats.rx_xdp_redirect;
1959916a6ee8SLorenzo Bianconi goto update_stats;
19605886d26fSLorenzo Bianconi case XDP_TX: {
19615886d26fSLorenzo Bianconi struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
19625886d26fSLorenzo Bianconi
1963a617ccc0SLorenzo Bianconi if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
19645886d26fSLorenzo Bianconi count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
19655886d26fSLorenzo Bianconi act = XDP_DROP;
19665886d26fSLorenzo Bianconi break;
19675886d26fSLorenzo Bianconi }
19685886d26fSLorenzo Bianconi
19695886d26fSLorenzo Bianconi count = &hw_stats->xdp_stats.rx_xdp_tx;
19705886d26fSLorenzo Bianconi goto update_stats;
19715886d26fSLorenzo Bianconi }
19727c26c20dSLorenzo Bianconi default:
19737c26c20dSLorenzo Bianconi bpf_warn_invalid_xdp_action(dev, prog, act);
19747c26c20dSLorenzo Bianconi fallthrough;
19757c26c20dSLorenzo Bianconi case XDP_ABORTED:
19767c26c20dSLorenzo Bianconi trace_xdp_exception(dev, prog, act);
19777c26c20dSLorenzo Bianconi fallthrough;
19787c26c20dSLorenzo Bianconi case XDP_DROP:
19797c26c20dSLorenzo Bianconi break;
19807c26c20dSLorenzo Bianconi }
19817c26c20dSLorenzo Bianconi
19827c26c20dSLorenzo Bianconi page_pool_put_full_page(ring->page_pool,
19837c26c20dSLorenzo Bianconi virt_to_head_page(xdp->data), true);
1984916a6ee8SLorenzo Bianconi
1985916a6ee8SLorenzo Bianconi update_stats:
1986916a6ee8SLorenzo Bianconi u64_stats_update_begin(&hw_stats->syncp);
1987916a6ee8SLorenzo Bianconi *count = *count + 1;
1988916a6ee8SLorenzo Bianconi u64_stats_update_end(&hw_stats->syncp);
19897c26c20dSLorenzo Bianconi out:
19907c26c20dSLorenzo Bianconi rcu_read_unlock();
19917c26c20dSLorenzo Bianconi
19927c26c20dSLorenzo Bianconi return act;
19937c26c20dSLorenzo Bianconi }
19947c26c20dSLorenzo Bianconi
mtk_poll_rx(struct napi_struct * napi,int budget,struct mtk_eth * eth)1995656e7052SJohn Crispin static int mtk_poll_rx(struct napi_struct *napi, int budget,
1996eece71e8SJohn Crispin struct mtk_eth *eth)
1997656e7052SJohn Crispin {
1998e9229ffdSFelix Fietkau struct dim_sample dim_sample = {};
1999ee406810SNelson Chang struct mtk_rx_ring *ring;
20007c26c20dSLorenzo Bianconi bool xdp_flush = false;
2001ee406810SNelson Chang int idx;
2002656e7052SJohn Crispin struct sk_buff *skb;
20032d75891eSDaniel Golle u64 addr64 = 0;
2004656e7052SJohn Crispin u8 *data, *new_data;
2005160d3a9bSLorenzo Bianconi struct mtk_rx_dma_v2 *rxd, trxd;
2006e9229ffdSFelix Fietkau int done = 0, bytes = 0;
2007e10a35abSDaniel Golle dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2008656e7052SJohn Crispin
2009656e7052SJohn Crispin while (done < budget) {
2010da6e113fSLorenzo Bianconi unsigned int pktlen, *rxdcsum;
2011656e7052SJohn Crispin struct net_device *netdev;
20127c26c20dSLorenzo Bianconi u32 hash, reason;
2013160d3a9bSLorenzo Bianconi int mac = 0;
2014656e7052SJohn Crispin
2015ee406810SNelson Chang ring = mtk_get_rx_ring(eth);
2016ee406810SNelson Chang if (unlikely(!ring))
2017ee406810SNelson Chang goto rx_done;
2018ee406810SNelson Chang
201908df5fa6SStefan Roese idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
20207173eca8SLorenzo Bianconi rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
2021656e7052SJohn Crispin data = ring->data[idx];
2022656e7052SJohn Crispin
2023160d3a9bSLorenzo Bianconi if (!mtk_rx_get_desc(eth, &trxd, rxd))
2024656e7052SJohn Crispin break;
2025656e7052SJohn Crispin
2026656e7052SJohn Crispin /* find out which mac the packet come from. values start at 1 */
20271953f134SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth)) {
20281953f134SLorenzo Bianconi u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
20291953f134SLorenzo Bianconi
20301953f134SLorenzo Bianconi switch (val) {
20311953f134SLorenzo Bianconi case PSE_GDM1_PORT:
20321953f134SLorenzo Bianconi case PSE_GDM2_PORT:
20331953f134SLorenzo Bianconi mac = val - 1;
20341953f134SLorenzo Bianconi break;
20351953f134SLorenzo Bianconi case PSE_GDM3_PORT:
20361953f134SLorenzo Bianconi mac = MTK_GMAC3_ID;
20371953f134SLorenzo Bianconi break;
20381953f134SLorenzo Bianconi default:
20391953f134SLorenzo Bianconi break;
20401953f134SLorenzo Bianconi }
20411953f134SLorenzo Bianconi } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
20421953f134SLorenzo Bianconi !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2043160d3a9bSLorenzo Bianconi mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
20441953f134SLorenzo Bianconi }
2045656e7052SJohn Crispin
2046e05fd627SLorenzo Bianconi if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
20476c7fce6fSSean Wang !eth->netdev[mac]))
20486c7fce6fSSean Wang goto release_desc;
20496c7fce6fSSean Wang
2050656e7052SJohn Crispin netdev = eth->netdev[mac];
2051656e7052SJohn Crispin
2052dce6fa42SSean Wang if (unlikely(test_bit(MTK_RESETTING, ð->state)))
2053dce6fa42SSean Wang goto release_desc;
2054dce6fa42SSean Wang
20557c26c20dSLorenzo Bianconi pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
20567c26c20dSLorenzo Bianconi
2057656e7052SJohn Crispin /* alloc new buffer */
205823233e57SLorenzo Bianconi if (ring->page_pool) {
20597c26c20dSLorenzo Bianconi struct page *page = virt_to_head_page(data);
20607c26c20dSLorenzo Bianconi struct xdp_buff xdp;
20617c26c20dSLorenzo Bianconi u32 ret;
20627c26c20dSLorenzo Bianconi
206323233e57SLorenzo Bianconi new_data = mtk_page_pool_get_buff(ring->page_pool,
206423233e57SLorenzo Bianconi &dma_addr,
206523233e57SLorenzo Bianconi GFP_ATOMIC);
2066656e7052SJohn Crispin if (unlikely(!new_data)) {
2067656e7052SJohn Crispin netdev->stats.rx_dropped++;
2068656e7052SJohn Crispin goto release_desc;
2069656e7052SJohn Crispin }
20707c26c20dSLorenzo Bianconi
20717c26c20dSLorenzo Bianconi dma_sync_single_for_cpu(eth->dma_dev,
20727c26c20dSLorenzo Bianconi page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
20737c26c20dSLorenzo Bianconi pktlen, page_pool_get_dma_dir(ring->page_pool));
20747c26c20dSLorenzo Bianconi
20757c26c20dSLorenzo Bianconi xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
20767c26c20dSLorenzo Bianconi xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
20777c26c20dSLorenzo Bianconi false);
20787c26c20dSLorenzo Bianconi xdp_buff_clear_frags_flag(&xdp);
20797c26c20dSLorenzo Bianconi
20807c26c20dSLorenzo Bianconi ret = mtk_xdp_run(eth, ring, &xdp, netdev);
20817c26c20dSLorenzo Bianconi if (ret == XDP_REDIRECT)
20827c26c20dSLorenzo Bianconi xdp_flush = true;
20837c26c20dSLorenzo Bianconi
20847c26c20dSLorenzo Bianconi if (ret != XDP_PASS)
20857c26c20dSLorenzo Bianconi goto skip_rx;
20867c26c20dSLorenzo Bianconi
20877c26c20dSLorenzo Bianconi skb = build_skb(data, PAGE_SIZE);
20887c26c20dSLorenzo Bianconi if (unlikely(!skb)) {
20897c26c20dSLorenzo Bianconi page_pool_put_full_page(ring->page_pool,
20907c26c20dSLorenzo Bianconi page, true);
20917c26c20dSLorenzo Bianconi netdev->stats.rx_dropped++;
20927c26c20dSLorenzo Bianconi goto skip_rx;
20937c26c20dSLorenzo Bianconi }
20947c26c20dSLorenzo Bianconi
20957c26c20dSLorenzo Bianconi skb_reserve(skb, xdp.data - xdp.data_hard_start);
20967c26c20dSLorenzo Bianconi skb_put(skb, xdp.data_end - xdp.data);
20977c26c20dSLorenzo Bianconi skb_mark_for_recycle(skb);
209823233e57SLorenzo Bianconi } else {
209923233e57SLorenzo Bianconi if (ring->frag_size <= PAGE_SIZE)
210023233e57SLorenzo Bianconi new_data = napi_alloc_frag(ring->frag_size);
210123233e57SLorenzo Bianconi else
210223233e57SLorenzo Bianconi new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
210323233e57SLorenzo Bianconi
210423233e57SLorenzo Bianconi if (unlikely(!new_data)) {
210523233e57SLorenzo Bianconi netdev->stats.rx_dropped++;
210623233e57SLorenzo Bianconi goto release_desc;
210723233e57SLorenzo Bianconi }
210823233e57SLorenzo Bianconi
2109d776a57eSFelix Fietkau dma_addr = dma_map_single(eth->dma_dev,
211023233e57SLorenzo Bianconi new_data + NET_SKB_PAD + eth->ip_align,
211123233e57SLorenzo Bianconi ring->buf_size, DMA_FROM_DEVICE);
211223233e57SLorenzo Bianconi if (unlikely(dma_mapping_error(eth->dma_dev,
211323233e57SLorenzo Bianconi dma_addr))) {
2114656e7052SJohn Crispin skb_free_frag(new_data);
211594321a9fSJohn Crispin netdev->stats.rx_dropped++;
2116656e7052SJohn Crispin goto release_desc;
2117656e7052SJohn Crispin }
2118656e7052SJohn Crispin
21192d75891eSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
21202d75891eSDaniel Golle addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
21212d75891eSDaniel Golle
21222d75891eSDaniel Golle dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
21235196c417SFelix Fietkau ring->buf_size, DMA_FROM_DEVICE);
21245196c417SFelix Fietkau
2125656e7052SJohn Crispin skb = build_skb(data, ring->frag_size);
2126656e7052SJohn Crispin if (unlikely(!skb)) {
212794321a9fSJohn Crispin netdev->stats.rx_dropped++;
21287c26c20dSLorenzo Bianconi skb_free_frag(data);
2129787082abSIlya Lipnitskiy goto skip_rx;
2130656e7052SJohn Crispin }
213123233e57SLorenzo Bianconi
21327c26c20dSLorenzo Bianconi skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2133656e7052SJohn Crispin skb_put(skb, pktlen);
21347c26c20dSLorenzo Bianconi }
21357c26c20dSLorenzo Bianconi
21367c26c20dSLorenzo Bianconi skb->dev = netdev;
21377c26c20dSLorenzo Bianconi bytes += skb->len;
2138da6e113fSLorenzo Bianconi
2139a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth)) {
214003a3180eSLorenzo Bianconi reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
21410cf731f9SLorenzo Bianconi hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
21420cf731f9SLorenzo Bianconi if (hash != MTK_RXD5_FOE_ENTRY)
21430cf731f9SLorenzo Bianconi skb_set_hash(skb, jhash_1word(hash, 0),
21440cf731f9SLorenzo Bianconi PKT_HASH_TYPE_L4);
2145da6e113fSLorenzo Bianconi rxdcsum = &trxd.rxd3;
21460cf731f9SLorenzo Bianconi } else {
214703a3180eSLorenzo Bianconi reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
21480cf731f9SLorenzo Bianconi hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
21490cf731f9SLorenzo Bianconi if (hash != MTK_RXD4_FOE_ENTRY)
21500cf731f9SLorenzo Bianconi skb_set_hash(skb, jhash_1word(hash, 0),
21510cf731f9SLorenzo Bianconi PKT_HASH_TYPE_L4);
2152da6e113fSLorenzo Bianconi rxdcsum = &trxd.rxd4;
21530cf731f9SLorenzo Bianconi }
2154da6e113fSLorenzo Bianconi
2155da6e113fSLorenzo Bianconi if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2156656e7052SJohn Crispin skb->ip_summed = CHECKSUM_UNNECESSARY;
2157656e7052SJohn Crispin else
2158656e7052SJohn Crispin skb_checksum_none_assert(skb);
2159656e7052SJohn Crispin skb->protocol = eth_type_trans(skb, netdev);
2160656e7052SJohn Crispin
21612d7605a7SFelix Fietkau /* When using VLAN untagging in combination with DSA, the
21622d7605a7SFelix Fietkau * hardware treats the MTK special tag as a VLAN and untags it.
2163160d3a9bSLorenzo Bianconi */
2164a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2165a008e2a8SLorenzo Bianconi netdev_uses_dsa(netdev)) {
2166c6d96df9SFelix Fietkau unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
21672d7605a7SFelix Fietkau
21682d7605a7SFelix Fietkau if (port < ARRAY_SIZE(eth->dsa_meta) &&
21692d7605a7SFelix Fietkau eth->dsa_meta[port])
21702d7605a7SFelix Fietkau skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
2171160d3a9bSLorenzo Bianconi }
2172160d3a9bSLorenzo Bianconi
21735f36ca1bSFelix Fietkau if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
21745f36ca1bSFelix Fietkau mtk_ppe_check_skb(eth->ppe[0], skb, hash);
21755f36ca1bSFelix Fietkau
2176a2d5e7b4SJohn Crispin skb_record_rx_queue(skb, 0);
2177656e7052SJohn Crispin napi_gro_receive(napi, skb);
2178656e7052SJohn Crispin
2179787082abSIlya Lipnitskiy skip_rx:
2180656e7052SJohn Crispin ring->data[idx] = new_data;
2181656e7052SJohn Crispin rxd->rxd1 = (unsigned int)dma_addr;
2182656e7052SJohn Crispin release_desc:
2183296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2184296c9120SStefan Roese rxd->rxd2 = RX_DMA_LSO;
2185296c9120SStefan Roese else
2186160d3a9bSLorenzo Bianconi rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2187656e7052SJohn Crispin
2188e10a35abSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
2189e10a35abSDaniel Golle likely(dma_addr != DMA_MAPPING_ERROR))
21902d75891eSDaniel Golle rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
21912d75891eSDaniel Golle
2192656e7052SJohn Crispin ring->calc_idx = idx;
2193635372adSSean Wang done++;
2194635372adSSean Wang }
219541156ceaSSean Wang
2196ee406810SNelson Chang rx_done:
219741156ceaSSean Wang if (done) {
2198656e7052SJohn Crispin /* make sure that all changes to the dma ring are flushed before
2199656e7052SJohn Crispin * we continue
2200656e7052SJohn Crispin */
2201656e7052SJohn Crispin wmb();
2202ee406810SNelson Chang mtk_update_rx_cpu_idx(eth);
220341156ceaSSean Wang }
2204656e7052SJohn Crispin
2205e9229ffdSFelix Fietkau eth->rx_packets += done;
2206e9229ffdSFelix Fietkau eth->rx_bytes += bytes;
2207e9229ffdSFelix Fietkau dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2208e9229ffdSFelix Fietkau &dim_sample);
2209e9229ffdSFelix Fietkau net_dim(ð->rx_dim, dim_sample);
2210e9229ffdSFelix Fietkau
22117c26c20dSLorenzo Bianconi if (xdp_flush)
22127c26c20dSLorenzo Bianconi xdp_do_flush_map();
22137c26c20dSLorenzo Bianconi
2214656e7052SJohn Crispin return done;
2215656e7052SJohn Crispin }
2216656e7052SJohn Crispin
2217f63959c7SFelix Fietkau struct mtk_poll_state {
2218f63959c7SFelix Fietkau struct netdev_queue *txq;
2219f63959c7SFelix Fietkau unsigned int total;
2220f63959c7SFelix Fietkau unsigned int done;
2221f63959c7SFelix Fietkau unsigned int bytes;
2222f63959c7SFelix Fietkau };
2223f63959c7SFelix Fietkau
2224f63959c7SFelix Fietkau static void
mtk_poll_tx_done(struct mtk_eth * eth,struct mtk_poll_state * state,u8 mac,struct sk_buff * skb)2225f63959c7SFelix Fietkau mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2226f63959c7SFelix Fietkau struct sk_buff *skb)
2227f63959c7SFelix Fietkau {
2228f63959c7SFelix Fietkau struct netdev_queue *txq;
2229f63959c7SFelix Fietkau struct net_device *dev;
2230f63959c7SFelix Fietkau unsigned int bytes = skb->len;
2231f63959c7SFelix Fietkau
2232f63959c7SFelix Fietkau state->total++;
2233f63959c7SFelix Fietkau eth->tx_packets++;
2234f63959c7SFelix Fietkau eth->tx_bytes += bytes;
2235f63959c7SFelix Fietkau
2236f63959c7SFelix Fietkau dev = eth->netdev[mac];
2237f63959c7SFelix Fietkau if (!dev)
2238f63959c7SFelix Fietkau return;
2239f63959c7SFelix Fietkau
2240f63959c7SFelix Fietkau txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2241f63959c7SFelix Fietkau if (state->txq == txq) {
2242f63959c7SFelix Fietkau state->done++;
2243f63959c7SFelix Fietkau state->bytes += bytes;
2244f63959c7SFelix Fietkau return;
2245f63959c7SFelix Fietkau }
2246f63959c7SFelix Fietkau
2247f63959c7SFelix Fietkau if (state->txq)
2248f63959c7SFelix Fietkau netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2249f63959c7SFelix Fietkau
2250f63959c7SFelix Fietkau state->txq = txq;
2251f63959c7SFelix Fietkau state->done = 1;
2252f63959c7SFelix Fietkau state->bytes = bytes;
2253f63959c7SFelix Fietkau }
2254f63959c7SFelix Fietkau
mtk_poll_tx_qdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2255296c9120SStefan Roese static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2256f63959c7SFelix Fietkau struct mtk_poll_state *state)
2257656e7052SJohn Crispin {
22588cb42714SLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2259656e7052SJohn Crispin struct mtk_tx_ring *ring = ð->tx_ring;
2260656e7052SJohn Crispin struct mtk_tx_buf *tx_buf;
2261853246dbSLorenzo Bianconi struct xdp_frame_bulk bq;
22625886d26fSLorenzo Bianconi struct mtk_tx_dma *desc;
2263656e7052SJohn Crispin u32 cpu, dma;
2264656e7052SJohn Crispin
22654e6bf609SFelix Fietkau cpu = ring->last_free_ptr;
22668cb42714SLorenzo Bianconi dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2267656e7052SJohn Crispin
2268656e7052SJohn Crispin desc = mtk_qdma_phys_to_virt(ring, cpu);
2269853246dbSLorenzo Bianconi xdp_frame_bulk_init(&bq);
2270656e7052SJohn Crispin
2271656e7052SJohn Crispin while ((cpu != dma) && budget) {
2272656e7052SJohn Crispin u32 next_cpu = desc->txd2;
2273656e7052SJohn Crispin
2274656e7052SJohn Crispin desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2275656e7052SJohn Crispin if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2276656e7052SJohn Crispin break;
2277656e7052SJohn Crispin
2278c4fd06c2SLorenzo Bianconi tx_buf = mtk_desc_to_tx_buf(ring, desc,
2279c4fd06c2SLorenzo Bianconi eth->soc->txrx.txd_size);
22805886d26fSLorenzo Bianconi if (!tx_buf->data)
2281656e7052SJohn Crispin break;
2282656e7052SJohn Crispin
2283155738a4SLorenzo Bianconi if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2284f63959c7SFelix Fietkau if (tx_buf->type == MTK_TYPE_SKB)
22851953f134SLorenzo Bianconi mtk_poll_tx_done(eth, state, tx_buf->mac_id,
22861953f134SLorenzo Bianconi tx_buf->data);
22875886d26fSLorenzo Bianconi
22885886d26fSLorenzo Bianconi budget--;
2289656e7052SJohn Crispin }
2290853246dbSLorenzo Bianconi mtk_tx_unmap(eth, tx_buf, &bq, true);
2291656e7052SJohn Crispin
2292656e7052SJohn Crispin ring->last_free = desc;
2293656e7052SJohn Crispin atomic_inc(&ring->free_count);
2294656e7052SJohn Crispin
2295656e7052SJohn Crispin cpu = next_cpu;
2296656e7052SJohn Crispin }
2297853246dbSLorenzo Bianconi xdp_flush_frame_bulk(&bq);
2298656e7052SJohn Crispin
22994e6bf609SFelix Fietkau ring->last_free_ptr = cpu;
23008cb42714SLorenzo Bianconi mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2301656e7052SJohn Crispin
2302296c9120SStefan Roese return budget;
2303296c9120SStefan Roese }
2304296c9120SStefan Roese
mtk_poll_tx_pdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2305296c9120SStefan Roese static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2306f63959c7SFelix Fietkau struct mtk_poll_state *state)
2307296c9120SStefan Roese {
2308296c9120SStefan Roese struct mtk_tx_ring *ring = ð->tx_ring;
2309296c9120SStefan Roese struct mtk_tx_buf *tx_buf;
2310853246dbSLorenzo Bianconi struct xdp_frame_bulk bq;
23115886d26fSLorenzo Bianconi struct mtk_tx_dma *desc;
2312296c9120SStefan Roese u32 cpu, dma;
2313296c9120SStefan Roese
2314296c9120SStefan Roese cpu = ring->cpu_idx;
2315296c9120SStefan Roese dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2316853246dbSLorenzo Bianconi xdp_frame_bulk_init(&bq);
2317296c9120SStefan Roese
2318296c9120SStefan Roese while ((cpu != dma) && budget) {
2319296c9120SStefan Roese tx_buf = &ring->buf[cpu];
23205886d26fSLorenzo Bianconi if (!tx_buf->data)
2321296c9120SStefan Roese break;
2322296c9120SStefan Roese
2323155738a4SLorenzo Bianconi if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2324f63959c7SFelix Fietkau if (tx_buf->type == MTK_TYPE_SKB)
2325f63959c7SFelix Fietkau mtk_poll_tx_done(eth, state, 0, tx_buf->data);
23265886d26fSLorenzo Bianconi budget--;
2327296c9120SStefan Roese }
2328853246dbSLorenzo Bianconi mtk_tx_unmap(eth, tx_buf, &bq, true);
2329296c9120SStefan Roese
23307173eca8SLorenzo Bianconi desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2331296c9120SStefan Roese ring->last_free = desc;
2332296c9120SStefan Roese atomic_inc(&ring->free_count);
2333296c9120SStefan Roese
2334296c9120SStefan Roese cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2335296c9120SStefan Roese }
2336853246dbSLorenzo Bianconi xdp_flush_frame_bulk(&bq);
2337296c9120SStefan Roese
2338296c9120SStefan Roese ring->cpu_idx = cpu;
2339296c9120SStefan Roese
2340296c9120SStefan Roese return budget;
2341296c9120SStefan Roese }
2342296c9120SStefan Roese
mtk_poll_tx(struct mtk_eth * eth,int budget)2343296c9120SStefan Roese static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2344296c9120SStefan Roese {
2345296c9120SStefan Roese struct mtk_tx_ring *ring = ð->tx_ring;
2346e9229ffdSFelix Fietkau struct dim_sample dim_sample = {};
2347f63959c7SFelix Fietkau struct mtk_poll_state state = {};
2348296c9120SStefan Roese
2349296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2350f63959c7SFelix Fietkau budget = mtk_poll_tx_qdma(eth, budget, &state);
2351296c9120SStefan Roese else
2352f63959c7SFelix Fietkau budget = mtk_poll_tx_pdma(eth, budget, &state);
2353296c9120SStefan Roese
2354f63959c7SFelix Fietkau if (state.txq)
2355f63959c7SFelix Fietkau netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2356656e7052SJohn Crispin
2357e9229ffdSFelix Fietkau dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2358e9229ffdSFelix Fietkau &dim_sample);
2359e9229ffdSFelix Fietkau net_dim(ð->tx_dim, dim_sample);
2360e9229ffdSFelix Fietkau
2361ad3cba98SJohn Crispin if (mtk_queue_stopped(eth) &&
2362ad3cba98SJohn Crispin (atomic_read(&ring->free_count) > ring->thresh))
236313c822f6SJohn Crispin mtk_wake_queue(eth);
2364656e7052SJohn Crispin
2365f63959c7SFelix Fietkau return state.total;
2366656e7052SJohn Crispin }
2367656e7052SJohn Crispin
mtk_handle_status_irq(struct mtk_eth * eth)236880673029SJohn Crispin static void mtk_handle_status_irq(struct mtk_eth *eth)
2369656e7052SJohn Crispin {
237080673029SJohn Crispin u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2371656e7052SJohn Crispin
2372eece71e8SJohn Crispin if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2373656e7052SJohn Crispin mtk_stats_update(eth);
2374eece71e8SJohn Crispin mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2375eece71e8SJohn Crispin MTK_INT_STATUS2);
2376656e7052SJohn Crispin }
2377656e7052SJohn Crispin }
2378656e7052SJohn Crispin
mtk_napi_tx(struct napi_struct * napi,int budget)237980673029SJohn Crispin static int mtk_napi_tx(struct napi_struct *napi, int budget)
238080673029SJohn Crispin {
238180673029SJohn Crispin struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
23828cb42714SLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
238380673029SJohn Crispin int tx_done = 0;
238480673029SJohn Crispin
2385296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
238680673029SJohn Crispin mtk_handle_status_irq(eth);
23878cb42714SLorenzo Bianconi mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
238880673029SJohn Crispin tx_done = mtk_poll_tx(eth, budget);
238980673029SJohn Crispin
239080673029SJohn Crispin if (unlikely(netif_msg_intr(eth))) {
239180673029SJohn Crispin dev_info(eth->dev,
2392db2c7b35SIlya Lipnitskiy "done tx %d, intr 0x%08x/0x%x\n", tx_done,
23938cb42714SLorenzo Bianconi mtk_r32(eth, reg_map->tx_irq_status),
23948cb42714SLorenzo Bianconi mtk_r32(eth, reg_map->tx_irq_mask));
239580673029SJohn Crispin }
239680673029SJohn Crispin
239780673029SJohn Crispin if (tx_done == budget)
2398656e7052SJohn Crispin return budget;
2399656e7052SJohn Crispin
24008cb42714SLorenzo Bianconi if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2401656e7052SJohn Crispin return budget;
2402656e7052SJohn Crispin
2403db2c7b35SIlya Lipnitskiy if (napi_complete_done(napi, tx_done))
24045cce0322SJohn Crispin mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
240580673029SJohn Crispin
240680673029SJohn Crispin return tx_done;
240780673029SJohn Crispin }
240880673029SJohn Crispin
mtk_napi_rx(struct napi_struct * napi,int budget)240980673029SJohn Crispin static int mtk_napi_rx(struct napi_struct *napi, int budget)
241080673029SJohn Crispin {
241180673029SJohn Crispin struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
24128cb42714SLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2413db2c7b35SIlya Lipnitskiy int rx_done_total = 0;
241480673029SJohn Crispin
241580673029SJohn Crispin mtk_handle_status_irq(eth);
241641156ceaSSean Wang
2417db2c7b35SIlya Lipnitskiy do {
2418db2c7b35SIlya Lipnitskiy int rx_done;
2419db2c7b35SIlya Lipnitskiy
2420160d3a9bSLorenzo Bianconi mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2421160d3a9bSLorenzo Bianconi reg_map->pdma.irq_status);
2422db2c7b35SIlya Lipnitskiy rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2423db2c7b35SIlya Lipnitskiy rx_done_total += rx_done;
242480673029SJohn Crispin
242580673029SJohn Crispin if (unlikely(netif_msg_intr(eth))) {
242680673029SJohn Crispin dev_info(eth->dev,
2427db2c7b35SIlya Lipnitskiy "done rx %d, intr 0x%08x/0x%x\n", rx_done,
24288cb42714SLorenzo Bianconi mtk_r32(eth, reg_map->pdma.irq_status),
24298cb42714SLorenzo Bianconi mtk_r32(eth, reg_map->pdma.irq_mask));
243080673029SJohn Crispin }
2431db2c7b35SIlya Lipnitskiy
2432db2c7b35SIlya Lipnitskiy if (rx_done_total == budget)
243380673029SJohn Crispin return budget;
243480673029SJohn Crispin
2435160d3a9bSLorenzo Bianconi } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2436160d3a9bSLorenzo Bianconi eth->soc->txrx.rx_irq_done_mask);
243716769a89SFelix Fietkau
2438db2c7b35SIlya Lipnitskiy if (napi_complete_done(napi, rx_done_total))
2439160d3a9bSLorenzo Bianconi mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2440656e7052SJohn Crispin
2441db2c7b35SIlya Lipnitskiy return rx_done_total;
2442656e7052SJohn Crispin }
2443656e7052SJohn Crispin
mtk_tx_alloc(struct mtk_eth * eth)2444656e7052SJohn Crispin static int mtk_tx_alloc(struct mtk_eth *eth)
2445656e7052SJohn Crispin {
24460e05744bSLorenzo Bianconi const struct mtk_soc_data *soc = eth->soc;
2447656e7052SJohn Crispin struct mtk_tx_ring *ring = ð->tx_ring;
24480e05744bSLorenzo Bianconi int i, sz = soc->txrx.txd_size;
2449160d3a9bSLorenzo Bianconi struct mtk_tx_dma_v2 *txd;
2450c30e0b9bSFelix Fietkau int ring_size;
2451f63959c7SFelix Fietkau u32 ofs, val;
2452656e7052SJohn Crispin
2453c30e0b9bSFelix Fietkau if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2454c30e0b9bSFelix Fietkau ring_size = MTK_QDMA_RING_SIZE;
2455c30e0b9bSFelix Fietkau else
2456c30e0b9bSFelix Fietkau ring_size = MTK_DMA_SIZE;
2457c30e0b9bSFelix Fietkau
2458c30e0b9bSFelix Fietkau ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2459656e7052SJohn Crispin GFP_KERNEL);
2460656e7052SJohn Crispin if (!ring->buf)
2461656e7052SJohn Crispin goto no_tx_mem;
2462656e7052SJohn Crispin
2463ebb1e4f9SDaniel Golle if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
2464ebb1e4f9SDaniel Golle ring->dma = eth->sram_base + ring_size * sz;
2465ebb1e4f9SDaniel Golle ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
2466ebb1e4f9SDaniel Golle } else {
2467c30e0b9bSFelix Fietkau ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
246862dfb4ccSLorenzo Bianconi &ring->phys, GFP_KERNEL);
2469ebb1e4f9SDaniel Golle }
2470ebb1e4f9SDaniel Golle
2471656e7052SJohn Crispin if (!ring->dma)
2472656e7052SJohn Crispin goto no_tx_mem;
2473656e7052SJohn Crispin
2474c30e0b9bSFelix Fietkau for (i = 0; i < ring_size; i++) {
2475c30e0b9bSFelix Fietkau int next = (i + 1) % ring_size;
2476656e7052SJohn Crispin u32 next_ptr = ring->phys + next * sz;
2477656e7052SJohn Crispin
24787173eca8SLorenzo Bianconi txd = ring->dma + i * sz;
24790e05744bSLorenzo Bianconi txd->txd2 = next_ptr;
24800e05744bSLorenzo Bianconi txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
24810e05744bSLorenzo Bianconi txd->txd4 = 0;
2482a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth)) {
2483160d3a9bSLorenzo Bianconi txd->txd5 = 0;
2484160d3a9bSLorenzo Bianconi txd->txd6 = 0;
2485160d3a9bSLorenzo Bianconi txd->txd7 = 0;
2486160d3a9bSLorenzo Bianconi txd->txd8 = 0;
2487160d3a9bSLorenzo Bianconi }
2488656e7052SJohn Crispin }
2489656e7052SJohn Crispin
2490296c9120SStefan Roese /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2491296c9120SStefan Roese * only as the framework. The real HW descriptors are the PDMA
2492296c9120SStefan Roese * descriptors in ring->dma_pdma.
2493296c9120SStefan Roese */
2494160d3a9bSLorenzo Bianconi if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2495c30e0b9bSFelix Fietkau ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
249662dfb4ccSLorenzo Bianconi &ring->phys_pdma, GFP_KERNEL);
2497296c9120SStefan Roese if (!ring->dma_pdma)
2498296c9120SStefan Roese goto no_tx_mem;
2499296c9120SStefan Roese
2500c30e0b9bSFelix Fietkau for (i = 0; i < ring_size; i++) {
2501296c9120SStefan Roese ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2502296c9120SStefan Roese ring->dma_pdma[i].txd4 = 0;
2503296c9120SStefan Roese }
2504296c9120SStefan Roese }
2505296c9120SStefan Roese
2506c30e0b9bSFelix Fietkau ring->dma_size = ring_size;
2507c30e0b9bSFelix Fietkau atomic_set(&ring->free_count, ring_size - 2);
25087173eca8SLorenzo Bianconi ring->next_free = ring->dma;
25090e05744bSLorenzo Bianconi ring->last_free = (void *)txd;
2510c30e0b9bSFelix Fietkau ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
251104698cccSJohn Crispin ring->thresh = MAX_SKB_FRAGS;
2512656e7052SJohn Crispin
2513656e7052SJohn Crispin /* make sure that all changes to the dma ring are flushed before we
2514656e7052SJohn Crispin * continue
2515656e7052SJohn Crispin */
2516656e7052SJohn Crispin wmb();
2517656e7052SJohn Crispin
25188cb42714SLorenzo Bianconi if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
25198cb42714SLorenzo Bianconi mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
25208cb42714SLorenzo Bianconi mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2521656e7052SJohn Crispin mtk_w32(eth,
2522c30e0b9bSFelix Fietkau ring->phys + ((ring_size - 1) * sz),
25238cb42714SLorenzo Bianconi soc->reg_map->qdma.crx_ptr);
25248cb42714SLorenzo Bianconi mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2525f63959c7SFelix Fietkau
2526f63959c7SFelix Fietkau for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2527f63959c7SFelix Fietkau val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2528f63959c7SFelix Fietkau mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2529f63959c7SFelix Fietkau
2530f63959c7SFelix Fietkau val = MTK_QTX_SCH_MIN_RATE_EN |
2531f63959c7SFelix Fietkau /* minimum: 10 Mbps */
2532f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2533f63959c7SFelix Fietkau FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2534f63959c7SFelix Fietkau MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2535a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v1(eth))
2536f63959c7SFelix Fietkau val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2537f63959c7SFelix Fietkau mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2538f63959c7SFelix Fietkau ofs += MTK_QTX_OFFSET;
2539f63959c7SFelix Fietkau }
2540f63959c7SFelix Fietkau val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2541f63959c7SFelix Fietkau mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2542a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth))
2543f63959c7SFelix Fietkau mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2544296c9120SStefan Roese } else {
2545296c9120SStefan Roese mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2546c30e0b9bSFelix Fietkau mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2547296c9120SStefan Roese mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
25488cb42714SLorenzo Bianconi mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2549296c9120SStefan Roese }
2550656e7052SJohn Crispin
2551656e7052SJohn Crispin return 0;
2552656e7052SJohn Crispin
2553656e7052SJohn Crispin no_tx_mem:
2554656e7052SJohn Crispin return -ENOMEM;
2555656e7052SJohn Crispin }
2556656e7052SJohn Crispin
mtk_tx_clean(struct mtk_eth * eth)2557656e7052SJohn Crispin static void mtk_tx_clean(struct mtk_eth *eth)
2558656e7052SJohn Crispin {
25590e05744bSLorenzo Bianconi const struct mtk_soc_data *soc = eth->soc;
2560656e7052SJohn Crispin struct mtk_tx_ring *ring = ð->tx_ring;
2561656e7052SJohn Crispin int i;
2562656e7052SJohn Crispin
2563656e7052SJohn Crispin if (ring->buf) {
2564c30e0b9bSFelix Fietkau for (i = 0; i < ring->dma_size; i++)
2565853246dbSLorenzo Bianconi mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2566656e7052SJohn Crispin kfree(ring->buf);
2567656e7052SJohn Crispin ring->buf = NULL;
2568656e7052SJohn Crispin }
2569ebb1e4f9SDaniel Golle if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
2570d776a57eSFelix Fietkau dma_free_coherent(eth->dma_dev,
2571c30e0b9bSFelix Fietkau ring->dma_size * soc->txrx.txd_size,
25720e05744bSLorenzo Bianconi ring->dma, ring->phys);
2573656e7052SJohn Crispin ring->dma = NULL;
2574656e7052SJohn Crispin }
2575296c9120SStefan Roese
2576296c9120SStefan Roese if (ring->dma_pdma) {
2577d776a57eSFelix Fietkau dma_free_coherent(eth->dma_dev,
2578c30e0b9bSFelix Fietkau ring->dma_size * soc->txrx.txd_size,
25790e05744bSLorenzo Bianconi ring->dma_pdma, ring->phys_pdma);
2580296c9120SStefan Roese ring->dma_pdma = NULL;
2581296c9120SStefan Roese }
2582656e7052SJohn Crispin }
2583656e7052SJohn Crispin
mtk_rx_alloc(struct mtk_eth * eth,int ring_no,int rx_flag)2584ee406810SNelson Chang static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2585656e7052SJohn Crispin {
25868cb42714SLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
25876427dc1dSJohn Crispin struct mtk_rx_ring *ring;
2588ebb1e4f9SDaniel Golle int rx_data_len, rx_dma_size, tx_ring_size;
2589656e7052SJohn Crispin int i;
25906427dc1dSJohn Crispin
2591ebb1e4f9SDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2592ebb1e4f9SDaniel Golle tx_ring_size = MTK_QDMA_RING_SIZE;
2593ebb1e4f9SDaniel Golle else
2594ebb1e4f9SDaniel Golle tx_ring_size = MTK_DMA_SIZE;
2595ebb1e4f9SDaniel Golle
25966427dc1dSJohn Crispin if (rx_flag == MTK_RX_FLAGS_QDMA) {
25976427dc1dSJohn Crispin if (ring_no)
25986427dc1dSJohn Crispin return -EINVAL;
25996427dc1dSJohn Crispin ring = ð->rx_ring_qdma;
26006427dc1dSJohn Crispin } else {
26016427dc1dSJohn Crispin ring = ð->rx_ring[ring_no];
26026427dc1dSJohn Crispin }
2603656e7052SJohn Crispin
2604ee406810SNelson Chang if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2605ee406810SNelson Chang rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2606ee406810SNelson Chang rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2607ee406810SNelson Chang } else {
2608ee406810SNelson Chang rx_data_len = ETH_DATA_LEN;
2609ee406810SNelson Chang rx_dma_size = MTK_DMA_SIZE;
2610ee406810SNelson Chang }
2611ee406810SNelson Chang
2612ee406810SNelson Chang ring->frag_size = mtk_max_frag_size(rx_data_len);
2613656e7052SJohn Crispin ring->buf_size = mtk_max_buf_size(ring->frag_size);
2614ee406810SNelson Chang ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2615656e7052SJohn Crispin GFP_KERNEL);
2616656e7052SJohn Crispin if (!ring->data)
2617656e7052SJohn Crispin return -ENOMEM;
2618656e7052SJohn Crispin
26197c26c20dSLorenzo Bianconi if (mtk_page_pool_enabled(eth)) {
262023233e57SLorenzo Bianconi struct page_pool *pp;
262123233e57SLorenzo Bianconi
262223233e57SLorenzo Bianconi pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
262323233e57SLorenzo Bianconi rx_dma_size);
262423233e57SLorenzo Bianconi if (IS_ERR(pp))
262523233e57SLorenzo Bianconi return PTR_ERR(pp);
262623233e57SLorenzo Bianconi
262723233e57SLorenzo Bianconi ring->page_pool = pp;
2628656e7052SJohn Crispin }
2629656e7052SJohn Crispin
2630ebb1e4f9SDaniel Golle if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
2631ebb1e4f9SDaniel Golle rx_flag != MTK_RX_FLAGS_NORMAL) {
2632d776a57eSFelix Fietkau ring->dma = dma_alloc_coherent(eth->dma_dev,
2633670ff7daSLorenzo Bianconi rx_dma_size * eth->soc->txrx.rxd_size,
263462dfb4ccSLorenzo Bianconi &ring->phys, GFP_KERNEL);
2635ebb1e4f9SDaniel Golle } else {
2636ebb1e4f9SDaniel Golle struct mtk_tx_ring *tx_ring = ð->tx_ring;
2637ebb1e4f9SDaniel Golle
2638ebb1e4f9SDaniel Golle ring->dma = tx_ring->dma + tx_ring_size *
2639ebb1e4f9SDaniel Golle eth->soc->txrx.txd_size * (ring_no + 1);
2640ebb1e4f9SDaniel Golle ring->phys = tx_ring->phys + tx_ring_size *
2641ebb1e4f9SDaniel Golle eth->soc->txrx.txd_size * (ring_no + 1);
2642ebb1e4f9SDaniel Golle }
2643ebb1e4f9SDaniel Golle
2644656e7052SJohn Crispin if (!ring->dma)
2645656e7052SJohn Crispin return -ENOMEM;
2646656e7052SJohn Crispin
2647ee406810SNelson Chang for (i = 0; i < rx_dma_size; i++) {
2648160d3a9bSLorenzo Bianconi struct mtk_rx_dma_v2 *rxd;
264923233e57SLorenzo Bianconi dma_addr_t dma_addr;
265023233e57SLorenzo Bianconi void *data;
265172e27d37SLorenzo Bianconi
26527173eca8SLorenzo Bianconi rxd = ring->dma + i * eth->soc->txrx.rxd_size;
265323233e57SLorenzo Bianconi if (ring->page_pool) {
265423233e57SLorenzo Bianconi data = mtk_page_pool_get_buff(ring->page_pool,
265523233e57SLorenzo Bianconi &dma_addr, GFP_KERNEL);
265623233e57SLorenzo Bianconi if (!data)
265723233e57SLorenzo Bianconi return -ENOMEM;
265823233e57SLorenzo Bianconi } else {
265923233e57SLorenzo Bianconi if (ring->frag_size <= PAGE_SIZE)
266023233e57SLorenzo Bianconi data = netdev_alloc_frag(ring->frag_size);
266123233e57SLorenzo Bianconi else
266223233e57SLorenzo Bianconi data = mtk_max_lro_buf_alloc(GFP_KERNEL);
266323233e57SLorenzo Bianconi
266423233e57SLorenzo Bianconi if (!data)
266523233e57SLorenzo Bianconi return -ENOMEM;
266623233e57SLorenzo Bianconi
266723233e57SLorenzo Bianconi dma_addr = dma_map_single(eth->dma_dev,
266823233e57SLorenzo Bianconi data + NET_SKB_PAD + eth->ip_align,
266923233e57SLorenzo Bianconi ring->buf_size, DMA_FROM_DEVICE);
267023233e57SLorenzo Bianconi if (unlikely(dma_mapping_error(eth->dma_dev,
26713213f808SZiyang Xuan dma_addr))) {
26723213f808SZiyang Xuan skb_free_frag(data);
267323233e57SLorenzo Bianconi return -ENOMEM;
267423233e57SLorenzo Bianconi }
26753213f808SZiyang Xuan }
267672e27d37SLorenzo Bianconi rxd->rxd1 = (unsigned int)dma_addr;
267723233e57SLorenzo Bianconi ring->data[i] = data;
2678656e7052SJohn Crispin
2679296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
268072e27d37SLorenzo Bianconi rxd->rxd2 = RX_DMA_LSO;
2681296c9120SStefan Roese else
2682160d3a9bSLorenzo Bianconi rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
268372e27d37SLorenzo Bianconi
26842d75891eSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
26852d75891eSDaniel Golle rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
26862d75891eSDaniel Golle
268772e27d37SLorenzo Bianconi rxd->rxd3 = 0;
268872e27d37SLorenzo Bianconi rxd->rxd4 = 0;
2689a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth)) {
2690160d3a9bSLorenzo Bianconi rxd->rxd5 = 0;
2691160d3a9bSLorenzo Bianconi rxd->rxd6 = 0;
2692160d3a9bSLorenzo Bianconi rxd->rxd7 = 0;
2693160d3a9bSLorenzo Bianconi rxd->rxd8 = 0;
2694160d3a9bSLorenzo Bianconi }
2695656e7052SJohn Crispin }
269623233e57SLorenzo Bianconi
2697ee406810SNelson Chang ring->dma_size = rx_dma_size;
2698ee406810SNelson Chang ring->calc_idx_update = false;
2699ee406810SNelson Chang ring->calc_idx = rx_dma_size - 1;
2700160d3a9bSLorenzo Bianconi if (rx_flag == MTK_RX_FLAGS_QDMA)
2701160d3a9bSLorenzo Bianconi ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2702160d3a9bSLorenzo Bianconi ring_no * MTK_QRX_OFFSET;
2703160d3a9bSLorenzo Bianconi else
2704160d3a9bSLorenzo Bianconi ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2705160d3a9bSLorenzo Bianconi ring_no * MTK_QRX_OFFSET;
2706656e7052SJohn Crispin /* make sure that all changes to the dma ring are flushed before we
2707656e7052SJohn Crispin * continue
2708656e7052SJohn Crispin */
2709656e7052SJohn Crispin wmb();
2710656e7052SJohn Crispin
2711160d3a9bSLorenzo Bianconi if (rx_flag == MTK_RX_FLAGS_QDMA) {
27128cb42714SLorenzo Bianconi mtk_w32(eth, ring->phys,
2713160d3a9bSLorenzo Bianconi reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
27148cb42714SLorenzo Bianconi mtk_w32(eth, rx_dma_size,
2715160d3a9bSLorenzo Bianconi reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2716160d3a9bSLorenzo Bianconi mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2717160d3a9bSLorenzo Bianconi reg_map->qdma.rst_idx);
2718160d3a9bSLorenzo Bianconi } else {
2719160d3a9bSLorenzo Bianconi mtk_w32(eth, ring->phys,
2720160d3a9bSLorenzo Bianconi reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2721160d3a9bSLorenzo Bianconi mtk_w32(eth, rx_dma_size,
2722160d3a9bSLorenzo Bianconi reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2723160d3a9bSLorenzo Bianconi mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2724160d3a9bSLorenzo Bianconi reg_map->pdma.rst_idx);
2725160d3a9bSLorenzo Bianconi }
2726160d3a9bSLorenzo Bianconi mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2727656e7052SJohn Crispin
2728656e7052SJohn Crispin return 0;
2729656e7052SJohn Crispin }
2730656e7052SJohn Crispin
mtk_rx_clean(struct mtk_eth * eth,struct mtk_rx_ring * ring,bool in_sram)2731ebb1e4f9SDaniel Golle static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2732656e7052SJohn Crispin {
27332d75891eSDaniel Golle u64 addr64 = 0;
2734656e7052SJohn Crispin int i;
2735656e7052SJohn Crispin
2736656e7052SJohn Crispin if (ring->data && ring->dma) {
2737ee406810SNelson Chang for (i = 0; i < ring->dma_size; i++) {
273872e27d37SLorenzo Bianconi struct mtk_rx_dma *rxd;
273972e27d37SLorenzo Bianconi
2740656e7052SJohn Crispin if (!ring->data[i])
2741656e7052SJohn Crispin continue;
274272e27d37SLorenzo Bianconi
27437173eca8SLorenzo Bianconi rxd = ring->dma + i * eth->soc->txrx.rxd_size;
274472e27d37SLorenzo Bianconi if (!rxd->rxd1)
2745656e7052SJohn Crispin continue;
274672e27d37SLorenzo Bianconi
27472d75891eSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
27482d75891eSDaniel Golle addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
27492d75891eSDaniel Golle
27502d75891eSDaniel Golle dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
275172e27d37SLorenzo Bianconi ring->buf_size, DMA_FROM_DEVICE);
275223233e57SLorenzo Bianconi mtk_rx_put_buff(ring, ring->data[i], false);
2753656e7052SJohn Crispin }
2754656e7052SJohn Crispin kfree(ring->data);
2755656e7052SJohn Crispin ring->data = NULL;
2756656e7052SJohn Crispin }
2757656e7052SJohn Crispin
2758ebb1e4f9SDaniel Golle if (!in_sram && ring->dma) {
2759d776a57eSFelix Fietkau dma_free_coherent(eth->dma_dev,
2760670ff7daSLorenzo Bianconi ring->dma_size * eth->soc->txrx.rxd_size,
2761670ff7daSLorenzo Bianconi ring->dma, ring->phys);
2762656e7052SJohn Crispin ring->dma = NULL;
2763656e7052SJohn Crispin }
276423233e57SLorenzo Bianconi
276523233e57SLorenzo Bianconi if (ring->page_pool) {
276623233e57SLorenzo Bianconi if (xdp_rxq_info_is_reg(&ring->xdp_q))
276723233e57SLorenzo Bianconi xdp_rxq_info_unreg(&ring->xdp_q);
276823233e57SLorenzo Bianconi page_pool_destroy(ring->page_pool);
276923233e57SLorenzo Bianconi ring->page_pool = NULL;
277023233e57SLorenzo Bianconi }
2771656e7052SJohn Crispin }
2772656e7052SJohn Crispin
mtk_hwlro_rx_init(struct mtk_eth * eth)2773ee406810SNelson Chang static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2774ee406810SNelson Chang {
2775ee406810SNelson Chang int i;
2776ee406810SNelson Chang u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2777ee406810SNelson Chang u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2778ee406810SNelson Chang
2779ee406810SNelson Chang /* set LRO rings to auto-learn modes */
2780ee406810SNelson Chang ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2781ee406810SNelson Chang
2782ee406810SNelson Chang /* validate LRO ring */
2783ee406810SNelson Chang ring_ctrl_dw2 |= MTK_RING_VLD;
2784ee406810SNelson Chang
2785ee406810SNelson Chang /* set AGE timer (unit: 20us) */
2786ee406810SNelson Chang ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2787ee406810SNelson Chang ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2788ee406810SNelson Chang
2789ee406810SNelson Chang /* set max AGG timer (unit: 20us) */
2790ee406810SNelson Chang ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2791ee406810SNelson Chang
2792ee406810SNelson Chang /* set max LRO AGG count */
2793ee406810SNelson Chang ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2794ee406810SNelson Chang ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2795ee406810SNelson Chang
2796ee406810SNelson Chang for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2797ee406810SNelson Chang mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2798ee406810SNelson Chang mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2799ee406810SNelson Chang mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2800ee406810SNelson Chang }
2801ee406810SNelson Chang
2802ee406810SNelson Chang /* IPv4 checksum update enable */
2803ee406810SNelson Chang lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2804ee406810SNelson Chang
2805ee406810SNelson Chang /* switch priority comparison to packet count mode */
2806ee406810SNelson Chang lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2807ee406810SNelson Chang
2808ee406810SNelson Chang /* bandwidth threshold setting */
2809ee406810SNelson Chang mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2810ee406810SNelson Chang
2811ee406810SNelson Chang /* auto-learn score delta setting */
2812ee406810SNelson Chang mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2813ee406810SNelson Chang
2814ee406810SNelson Chang /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2815ee406810SNelson Chang mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2816ee406810SNelson Chang MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2817ee406810SNelson Chang
2818ee406810SNelson Chang /* set HW LRO mode & the max aggregation count for rx packets */
2819ee406810SNelson Chang lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2820ee406810SNelson Chang
2821ee406810SNelson Chang /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2822ee406810SNelson Chang lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2823ee406810SNelson Chang
2824ee406810SNelson Chang /* enable HW LRO */
2825ee406810SNelson Chang lro_ctrl_dw0 |= MTK_LRO_EN;
2826ee406810SNelson Chang
2827ee406810SNelson Chang mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2828ee406810SNelson Chang mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2829ee406810SNelson Chang
2830ee406810SNelson Chang return 0;
2831ee406810SNelson Chang }
2832ee406810SNelson Chang
mtk_hwlro_rx_uninit(struct mtk_eth * eth)2833ee406810SNelson Chang static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2834ee406810SNelson Chang {
2835ee406810SNelson Chang int i;
2836ee406810SNelson Chang u32 val;
2837ee406810SNelson Chang
2838ee406810SNelson Chang /* relinquish lro rings, flush aggregated packets */
2839ee406810SNelson Chang mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2840ee406810SNelson Chang
2841ee406810SNelson Chang /* wait for relinquishments done */
2842ee406810SNelson Chang for (i = 0; i < 10; i++) {
2843ee406810SNelson Chang val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2844ee406810SNelson Chang if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2845ee406810SNelson Chang msleep(20);
2846ee406810SNelson Chang continue;
2847ee406810SNelson Chang }
2848ca3ba106SNelson Chang break;
2849ee406810SNelson Chang }
2850ee406810SNelson Chang
2851ee406810SNelson Chang /* invalidate lro rings */
2852ee406810SNelson Chang for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2853ee406810SNelson Chang mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2854ee406810SNelson Chang
2855ee406810SNelson Chang /* disable HW LRO */
2856ee406810SNelson Chang mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2857ee406810SNelson Chang }
2858ee406810SNelson Chang
mtk_hwlro_val_ipaddr(struct mtk_eth * eth,int idx,__be32 ip)28597aab747eSNelson Chang static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
28607aab747eSNelson Chang {
28617aab747eSNelson Chang u32 reg_val;
28627aab747eSNelson Chang
28637aab747eSNelson Chang reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
28647aab747eSNelson Chang
28657aab747eSNelson Chang /* invalidate the IP setting */
28667aab747eSNelson Chang mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
28677aab747eSNelson Chang
28687aab747eSNelson Chang mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
28697aab747eSNelson Chang
28707aab747eSNelson Chang /* validate the IP setting */
28717aab747eSNelson Chang mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
28727aab747eSNelson Chang }
28737aab747eSNelson Chang
mtk_hwlro_inval_ipaddr(struct mtk_eth * eth,int idx)28747aab747eSNelson Chang static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
28757aab747eSNelson Chang {
28767aab747eSNelson Chang u32 reg_val;
28777aab747eSNelson Chang
28787aab747eSNelson Chang reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
28797aab747eSNelson Chang
28807aab747eSNelson Chang /* invalidate the IP setting */
28817aab747eSNelson Chang mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
28827aab747eSNelson Chang
28837aab747eSNelson Chang mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
28847aab747eSNelson Chang }
28857aab747eSNelson Chang
mtk_hwlro_get_ip_cnt(struct mtk_mac * mac)28867aab747eSNelson Chang static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
28877aab747eSNelson Chang {
28887aab747eSNelson Chang int cnt = 0;
28897aab747eSNelson Chang int i;
28907aab747eSNelson Chang
28917aab747eSNelson Chang for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
28927aab747eSNelson Chang if (mac->hwlro_ip[i])
28937aab747eSNelson Chang cnt++;
28947aab747eSNelson Chang }
28957aab747eSNelson Chang
28967aab747eSNelson Chang return cnt;
28977aab747eSNelson Chang }
28987aab747eSNelson Chang
mtk_hwlro_add_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)28997aab747eSNelson Chang static int mtk_hwlro_add_ipaddr(struct net_device *dev,
29007aab747eSNelson Chang struct ethtool_rxnfc *cmd)
29017aab747eSNelson Chang {
29027aab747eSNelson Chang struct ethtool_rx_flow_spec *fsp =
29037aab747eSNelson Chang (struct ethtool_rx_flow_spec *)&cmd->fs;
29047aab747eSNelson Chang struct mtk_mac *mac = netdev_priv(dev);
29057aab747eSNelson Chang struct mtk_eth *eth = mac->hw;
29067aab747eSNelson Chang int hwlro_idx;
29077aab747eSNelson Chang
29087aab747eSNelson Chang if ((fsp->flow_type != TCP_V4_FLOW) ||
29097aab747eSNelson Chang (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
29107aab747eSNelson Chang (fsp->location > 1))
29117aab747eSNelson Chang return -EINVAL;
29127aab747eSNelson Chang
29137aab747eSNelson Chang mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
29147aab747eSNelson Chang hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
29157aab747eSNelson Chang
29167aab747eSNelson Chang mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
29177aab747eSNelson Chang
29187aab747eSNelson Chang mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
29197aab747eSNelson Chang
29207aab747eSNelson Chang return 0;
29217aab747eSNelson Chang }
29227aab747eSNelson Chang
mtk_hwlro_del_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)29237aab747eSNelson Chang static int mtk_hwlro_del_ipaddr(struct net_device *dev,
29247aab747eSNelson Chang struct ethtool_rxnfc *cmd)
29257aab747eSNelson Chang {
29267aab747eSNelson Chang struct ethtool_rx_flow_spec *fsp =
29277aab747eSNelson Chang (struct ethtool_rx_flow_spec *)&cmd->fs;
29287aab747eSNelson Chang struct mtk_mac *mac = netdev_priv(dev);
29297aab747eSNelson Chang struct mtk_eth *eth = mac->hw;
29307aab747eSNelson Chang int hwlro_idx;
29317aab747eSNelson Chang
29327aab747eSNelson Chang if (fsp->location > 1)
29337aab747eSNelson Chang return -EINVAL;
29347aab747eSNelson Chang
29357aab747eSNelson Chang mac->hwlro_ip[fsp->location] = 0;
29367aab747eSNelson Chang hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
29377aab747eSNelson Chang
29387aab747eSNelson Chang mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
29397aab747eSNelson Chang
29407aab747eSNelson Chang mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
29417aab747eSNelson Chang
29427aab747eSNelson Chang return 0;
29437aab747eSNelson Chang }
29447aab747eSNelson Chang
mtk_hwlro_netdev_disable(struct net_device * dev)29457aab747eSNelson Chang static void mtk_hwlro_netdev_disable(struct net_device *dev)
29467aab747eSNelson Chang {
29477aab747eSNelson Chang struct mtk_mac *mac = netdev_priv(dev);
29487aab747eSNelson Chang struct mtk_eth *eth = mac->hw;
29497aab747eSNelson Chang int i, hwlro_idx;
29507aab747eSNelson Chang
29517aab747eSNelson Chang for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
29527aab747eSNelson Chang mac->hwlro_ip[i] = 0;
29537aab747eSNelson Chang hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
29547aab747eSNelson Chang
29557aab747eSNelson Chang mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
29567aab747eSNelson Chang }
29577aab747eSNelson Chang
29587aab747eSNelson Chang mac->hwlro_ip_cnt = 0;
29597aab747eSNelson Chang }
29607aab747eSNelson Chang
mtk_hwlro_get_fdir_entry(struct net_device * dev,struct ethtool_rxnfc * cmd)29617aab747eSNelson Chang static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
29627aab747eSNelson Chang struct ethtool_rxnfc *cmd)
29637aab747eSNelson Chang {
29647aab747eSNelson Chang struct mtk_mac *mac = netdev_priv(dev);
29657aab747eSNelson Chang struct ethtool_rx_flow_spec *fsp =
29667aab747eSNelson Chang (struct ethtool_rx_flow_spec *)&cmd->fs;
29677aab747eSNelson Chang
2968e7e7104eSDan Carpenter if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2969e7e7104eSDan Carpenter return -EINVAL;
2970e7e7104eSDan Carpenter
29717aab747eSNelson Chang /* only tcp dst ipv4 is meaningful, others are meaningless */
29727aab747eSNelson Chang fsp->flow_type = TCP_V4_FLOW;
29737aab747eSNelson Chang fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
29747aab747eSNelson Chang fsp->m_u.tcp_ip4_spec.ip4dst = 0;
29757aab747eSNelson Chang
29767aab747eSNelson Chang fsp->h_u.tcp_ip4_spec.ip4src = 0;
29777aab747eSNelson Chang fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
29787aab747eSNelson Chang fsp->h_u.tcp_ip4_spec.psrc = 0;
29797aab747eSNelson Chang fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
29807aab747eSNelson Chang fsp->h_u.tcp_ip4_spec.pdst = 0;
29817aab747eSNelson Chang fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
29827aab747eSNelson Chang fsp->h_u.tcp_ip4_spec.tos = 0;
29837aab747eSNelson Chang fsp->m_u.tcp_ip4_spec.tos = 0xff;
29847aab747eSNelson Chang
29857aab747eSNelson Chang return 0;
29867aab747eSNelson Chang }
29877aab747eSNelson Chang
mtk_hwlro_get_fdir_all(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)29887aab747eSNelson Chang static int mtk_hwlro_get_fdir_all(struct net_device *dev,
29897aab747eSNelson Chang struct ethtool_rxnfc *cmd,
29907aab747eSNelson Chang u32 *rule_locs)
29917aab747eSNelson Chang {
29927aab747eSNelson Chang struct mtk_mac *mac = netdev_priv(dev);
29937aab747eSNelson Chang int cnt = 0;
29947aab747eSNelson Chang int i;
29957aab747eSNelson Chang
29967aab747eSNelson Chang for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2997e4c79810SHangyu Hua if (cnt == cmd->rule_cnt)
2998e4c79810SHangyu Hua return -EMSGSIZE;
2999e4c79810SHangyu Hua
30007aab747eSNelson Chang if (mac->hwlro_ip[i]) {
30017aab747eSNelson Chang rule_locs[cnt] = i;
30027aab747eSNelson Chang cnt++;
30037aab747eSNelson Chang }
30047aab747eSNelson Chang }
30057aab747eSNelson Chang
30067aab747eSNelson Chang cmd->rule_cnt = cnt;
30077aab747eSNelson Chang
30087aab747eSNelson Chang return 0;
30097aab747eSNelson Chang }
30107aab747eSNelson Chang
mtk_fix_features(struct net_device * dev,netdev_features_t features)30117aab747eSNelson Chang static netdev_features_t mtk_fix_features(struct net_device *dev,
30127aab747eSNelson Chang netdev_features_t features)
30137aab747eSNelson Chang {
30147aab747eSNelson Chang if (!(features & NETIF_F_LRO)) {
30157aab747eSNelson Chang struct mtk_mac *mac = netdev_priv(dev);
30167aab747eSNelson Chang int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
30177aab747eSNelson Chang
30187aab747eSNelson Chang if (ip_cnt) {
30197aab747eSNelson Chang netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
30207aab747eSNelson Chang
30217aab747eSNelson Chang features |= NETIF_F_LRO;
30227aab747eSNelson Chang }
30237aab747eSNelson Chang }
30247aab747eSNelson Chang
30257aab747eSNelson Chang return features;
30267aab747eSNelson Chang }
30277aab747eSNelson Chang
mtk_set_features(struct net_device * dev,netdev_features_t features)30287aab747eSNelson Chang static int mtk_set_features(struct net_device *dev, netdev_features_t features)
30297aab747eSNelson Chang {
303008666cbbSFelix Fietkau netdev_features_t diff = dev->features ^ features;
30317aab747eSNelson Chang
303208666cbbSFelix Fietkau if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
30337aab747eSNelson Chang mtk_hwlro_netdev_disable(dev);
30347aab747eSNelson Chang
303508666cbbSFelix Fietkau return 0;
30367aab747eSNelson Chang }
30377aab747eSNelson Chang
3038656e7052SJohn Crispin /* wait for DMA to finish whatever it is doing before we start using it again */
mtk_dma_busy_wait(struct mtk_eth * eth)3039656e7052SJohn Crispin static int mtk_dma_busy_wait(struct mtk_eth *eth)
3040656e7052SJohn Crispin {
30413bc8e0afSIlya Lipnitskiy unsigned int reg;
30423bc8e0afSIlya Lipnitskiy int ret;
30433bc8e0afSIlya Lipnitskiy u32 val;
3044656e7052SJohn Crispin
30453bc8e0afSIlya Lipnitskiy if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
30468cb42714SLorenzo Bianconi reg = eth->soc->reg_map->qdma.glo_cfg;
30473bc8e0afSIlya Lipnitskiy else
30488cb42714SLorenzo Bianconi reg = eth->soc->reg_map->pdma.glo_cfg;
3049296c9120SStefan Roese
30503bc8e0afSIlya Lipnitskiy ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
30513bc8e0afSIlya Lipnitskiy !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
30523bc8e0afSIlya Lipnitskiy 5, MTK_DMA_BUSY_TIMEOUT_US);
30533bc8e0afSIlya Lipnitskiy if (ret)
3054656e7052SJohn Crispin dev_err(eth->dev, "DMA init timeout\n");
30553bc8e0afSIlya Lipnitskiy
30563bc8e0afSIlya Lipnitskiy return ret;
3057656e7052SJohn Crispin }
3058656e7052SJohn Crispin
mtk_dma_init(struct mtk_eth * eth)3059656e7052SJohn Crispin static int mtk_dma_init(struct mtk_eth *eth)
3060656e7052SJohn Crispin {
3061656e7052SJohn Crispin int err;
3062ee406810SNelson Chang u32 i;
3063656e7052SJohn Crispin
3064656e7052SJohn Crispin if (mtk_dma_busy_wait(eth))
3065656e7052SJohn Crispin return -EBUSY;
3066656e7052SJohn Crispin
3067296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3068656e7052SJohn Crispin /* QDMA needs scratch memory for internal reordering of the
3069656e7052SJohn Crispin * descriptors
3070656e7052SJohn Crispin */
3071656e7052SJohn Crispin err = mtk_init_fq_dma(eth);
3072656e7052SJohn Crispin if (err)
3073656e7052SJohn Crispin return err;
3074296c9120SStefan Roese }
3075656e7052SJohn Crispin
3076656e7052SJohn Crispin err = mtk_tx_alloc(eth);
3077656e7052SJohn Crispin if (err)
3078656e7052SJohn Crispin return err;
3079656e7052SJohn Crispin
3080296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
30816427dc1dSJohn Crispin err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
30826427dc1dSJohn Crispin if (err)
30836427dc1dSJohn Crispin return err;
3084296c9120SStefan Roese }
30856427dc1dSJohn Crispin
3086ee406810SNelson Chang err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3087656e7052SJohn Crispin if (err)
3088656e7052SJohn Crispin return err;
3089656e7052SJohn Crispin
3090ee406810SNelson Chang if (eth->hwlro) {
3091ee406810SNelson Chang for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3092ee406810SNelson Chang err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3093ee406810SNelson Chang if (err)
3094ee406810SNelson Chang return err;
3095ee406810SNelson Chang }
3096ee406810SNelson Chang err = mtk_hwlro_rx_init(eth);
3097ee406810SNelson Chang if (err)
3098ee406810SNelson Chang return err;
3099ee406810SNelson Chang }
3100ee406810SNelson Chang
3101296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3102296c9120SStefan Roese /* Enable random early drop and set drop threshold
3103296c9120SStefan Roese * automatically
3104296c9120SStefan Roese */
3105296c9120SStefan Roese mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
31068cb42714SLorenzo Bianconi FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
31078cb42714SLorenzo Bianconi mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3108296c9120SStefan Roese }
3109656e7052SJohn Crispin
3110656e7052SJohn Crispin return 0;
3111656e7052SJohn Crispin }
3112656e7052SJohn Crispin
mtk_dma_free(struct mtk_eth * eth)3113656e7052SJohn Crispin static void mtk_dma_free(struct mtk_eth *eth)
3114656e7052SJohn Crispin {
3115eb067347SLorenzo Bianconi const struct mtk_soc_data *soc = eth->soc;
3116656e7052SJohn Crispin int i;
3117656e7052SJohn Crispin
3118e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++)
3119656e7052SJohn Crispin if (eth->netdev[i])
3120656e7052SJohn Crispin netdev_reset_queue(eth->netdev[i]);
3121ebb1e4f9SDaniel Golle if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
3122d776a57eSFelix Fietkau dma_free_coherent(eth->dma_dev,
3123c30e0b9bSFelix Fietkau MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
3124eb067347SLorenzo Bianconi eth->scratch_ring, eth->phy_scratch_ring);
3125605e4fe4SJohn Crispin eth->scratch_ring = NULL;
3126605e4fe4SJohn Crispin eth->phy_scratch_ring = 0;
3127605e4fe4SJohn Crispin }
3128656e7052SJohn Crispin mtk_tx_clean(eth);
3129ebb1e4f9SDaniel Golle mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
3130ebb1e4f9SDaniel Golle mtk_rx_clean(eth, ð->rx_ring_qdma, false);
3131ee406810SNelson Chang
3132ee406810SNelson Chang if (eth->hwlro) {
3133ee406810SNelson Chang mtk_hwlro_rx_uninit(eth);
3134ee406810SNelson Chang for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3135ebb1e4f9SDaniel Golle mtk_rx_clean(eth, ð->rx_ring[i], false);
3136ee406810SNelson Chang }
3137ee406810SNelson Chang
3138656e7052SJohn Crispin kfree(eth->scratch_head);
3139656e7052SJohn Crispin }
3140656e7052SJohn Crispin
mtk_hw_reset_check(struct mtk_eth * eth)314106127504SLorenzo Bianconi static bool mtk_hw_reset_check(struct mtk_eth *eth)
314206127504SLorenzo Bianconi {
314306127504SLorenzo Bianconi u32 val = mtk_r32(eth, MTK_INT_STATUS2);
314406127504SLorenzo Bianconi
314506127504SLorenzo Bianconi return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
314606127504SLorenzo Bianconi (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
314706127504SLorenzo Bianconi (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
314806127504SLorenzo Bianconi }
314906127504SLorenzo Bianconi
mtk_tx_timeout(struct net_device * dev,unsigned int txqueue)31500290bd29SMichael S. Tsirkin static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3151656e7052SJohn Crispin {
3152656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
3153656e7052SJohn Crispin struct mtk_eth *eth = mac->hw;
3154656e7052SJohn Crispin
315506127504SLorenzo Bianconi if (test_bit(MTK_RESETTING, ð->state))
315606127504SLorenzo Bianconi return;
315706127504SLorenzo Bianconi
315806127504SLorenzo Bianconi if (!mtk_hw_reset_check(eth))
315906127504SLorenzo Bianconi return;
316006127504SLorenzo Bianconi
3161656e7052SJohn Crispin eth->netdev[mac->id]->stats.tx_errors++;
316206127504SLorenzo Bianconi netif_err(eth, tx_err, dev, "transmit timed out\n");
316306127504SLorenzo Bianconi
31647c78b4adSJohn Crispin schedule_work(ð->pending_work);
3165656e7052SJohn Crispin }
3166656e7052SJohn Crispin
mtk_handle_irq_rx(int irq,void * _eth)316780673029SJohn Crispin static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3168656e7052SJohn Crispin {
3169656e7052SJohn Crispin struct mtk_eth *eth = _eth;
3170656e7052SJohn Crispin
3171e9229ffdSFelix Fietkau eth->rx_events++;
317280673029SJohn Crispin if (likely(napi_schedule_prep(ð->rx_napi))) {
3173160d3a9bSLorenzo Bianconi mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3174fcdfc462SChristian Marangi __napi_schedule(ð->rx_napi);
3175656e7052SJohn Crispin }
317680673029SJohn Crispin
317780673029SJohn Crispin return IRQ_HANDLED;
317880673029SJohn Crispin }
317980673029SJohn Crispin
mtk_handle_irq_tx(int irq,void * _eth)318080673029SJohn Crispin static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
318180673029SJohn Crispin {
318280673029SJohn Crispin struct mtk_eth *eth = _eth;
318380673029SJohn Crispin
3184e9229ffdSFelix Fietkau eth->tx_events++;
318580673029SJohn Crispin if (likely(napi_schedule_prep(ð->tx_napi))) {
31865cce0322SJohn Crispin mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3187fcdfc462SChristian Marangi __napi_schedule(ð->tx_napi);
318880673029SJohn Crispin }
3189656e7052SJohn Crispin
3190656e7052SJohn Crispin return IRQ_HANDLED;
3191656e7052SJohn Crispin }
3192656e7052SJohn Crispin
mtk_handle_irq(int irq,void * _eth)3193889bcbdeSBjørn Mork static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3194889bcbdeSBjørn Mork {
3195889bcbdeSBjørn Mork struct mtk_eth *eth = _eth;
31968cb42714SLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3197889bcbdeSBjørn Mork
3198160d3a9bSLorenzo Bianconi if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3199160d3a9bSLorenzo Bianconi eth->soc->txrx.rx_irq_done_mask) {
3200160d3a9bSLorenzo Bianconi if (mtk_r32(eth, reg_map->pdma.irq_status) &
3201160d3a9bSLorenzo Bianconi eth->soc->txrx.rx_irq_done_mask)
3202889bcbdeSBjørn Mork mtk_handle_irq_rx(irq, _eth);
3203889bcbdeSBjørn Mork }
32048cb42714SLorenzo Bianconi if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
32058cb42714SLorenzo Bianconi if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3206889bcbdeSBjørn Mork mtk_handle_irq_tx(irq, _eth);
3207889bcbdeSBjørn Mork }
3208889bcbdeSBjørn Mork
3209889bcbdeSBjørn Mork return IRQ_HANDLED;
3210889bcbdeSBjørn Mork }
3211889bcbdeSBjørn Mork
3212656e7052SJohn Crispin #ifdef CONFIG_NET_POLL_CONTROLLER
mtk_poll_controller(struct net_device * dev)3213656e7052SJohn Crispin static void mtk_poll_controller(struct net_device *dev)
3214656e7052SJohn Crispin {
3215656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
3216656e7052SJohn Crispin struct mtk_eth *eth = mac->hw;
3217656e7052SJohn Crispin
32185cce0322SJohn Crispin mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3219160d3a9bSLorenzo Bianconi mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
32208186f6e3SJohn Crispin mtk_handle_irq_rx(eth->irq[2], dev);
32215cce0322SJohn Crispin mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3222160d3a9bSLorenzo Bianconi mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3223656e7052SJohn Crispin }
3224656e7052SJohn Crispin #endif
3225656e7052SJohn Crispin
mtk_start_dma(struct mtk_eth * eth)3226656e7052SJohn Crispin static int mtk_start_dma(struct mtk_eth *eth)
3227656e7052SJohn Crispin {
3228160d3a9bSLorenzo Bianconi u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
32298cb42714SLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3230656e7052SJohn Crispin int err;
3231656e7052SJohn Crispin
3232656e7052SJohn Crispin err = mtk_dma_init(eth);
3233656e7052SJohn Crispin if (err) {
3234656e7052SJohn Crispin mtk_dma_free(eth);
3235656e7052SJohn Crispin return err;
3236656e7052SJohn Crispin }
3237656e7052SJohn Crispin
3238296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3239160d3a9bSLorenzo Bianconi val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3240160d3a9bSLorenzo Bianconi val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
324159555a8dSFelix Fietkau MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3242160d3a9bSLorenzo Bianconi MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3243160d3a9bSLorenzo Bianconi
3244a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth))
3245160d3a9bSLorenzo Bianconi val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3246160d3a9bSLorenzo Bianconi MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3247f63959c7SFelix Fietkau MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3248160d3a9bSLorenzo Bianconi else
3249160d3a9bSLorenzo Bianconi val |= MTK_RX_BT_32DWORDS;
3250160d3a9bSLorenzo Bianconi mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3251160d3a9bSLorenzo Bianconi
3252bacfd110SNelson Chang mtk_w32(eth,
3253880c2d4bSSean Wang MTK_RX_DMA_EN | rx_2b_offset |
3254bacfd110SNelson Chang MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
32558cb42714SLorenzo Bianconi reg_map->pdma.glo_cfg);
3256296c9120SStefan Roese } else {
3257296c9120SStefan Roese mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3258296c9120SStefan Roese MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
32598cb42714SLorenzo Bianconi reg_map->pdma.glo_cfg);
3260296c9120SStefan Roese }
3261bacfd110SNelson Chang
3262656e7052SJohn Crispin return 0;
3263656e7052SJohn Crispin }
3264656e7052SJohn Crispin
mtk_gdm_config(struct mtk_eth * eth,u32 config)32658d3f4a95SMarkLee static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
32668d3f4a95SMarkLee {
32678d3f4a95SMarkLee int i;
32688d3f4a95SMarkLee
32695ac9eda0SMarkLee if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
32705ac9eda0SMarkLee return;
32715ac9eda0SMarkLee
3272e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
3273e05fd627SLorenzo Bianconi u32 val;
3274e05fd627SLorenzo Bianconi
3275e05fd627SLorenzo Bianconi if (!eth->netdev[i])
3276e05fd627SLorenzo Bianconi continue;
3277e05fd627SLorenzo Bianconi
3278e05fd627SLorenzo Bianconi val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
32798d3f4a95SMarkLee
32808d3f4a95SMarkLee /* default setup the forward port to send frame to PDMA */
32818d3f4a95SMarkLee val &= ~0xffff;
32828d3f4a95SMarkLee
32838d3f4a95SMarkLee /* Enable RX checksum */
32848d3f4a95SMarkLee val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
32858d3f4a95SMarkLee
32868d3f4a95SMarkLee val |= config;
32878d3f4a95SMarkLee
3288e05fd627SLorenzo Bianconi if (netdev_uses_dsa(eth->netdev[i]))
3289d5c53da2SFelix Fietkau val |= MTK_GDMA_SPECIAL_TAG;
3290d5c53da2SFelix Fietkau
32918d3f4a95SMarkLee mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
32928d3f4a95SMarkLee }
32938d3f4a95SMarkLee /* Reset and enable PSE */
32948d3f4a95SMarkLee mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
32958d3f4a95SMarkLee mtk_w32(eth, 0, MTK_RST_GL);
32968d3f4a95SMarkLee }
32978d3f4a95SMarkLee
32982d7605a7SFelix Fietkau
mtk_uses_dsa(struct net_device * dev)32992d7605a7SFelix Fietkau static bool mtk_uses_dsa(struct net_device *dev)
33002d7605a7SFelix Fietkau {
33012d7605a7SFelix Fietkau #if IS_ENABLED(CONFIG_NET_DSA)
33022d7605a7SFelix Fietkau return netdev_uses_dsa(dev) &&
33032d7605a7SFelix Fietkau dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
33042d7605a7SFelix Fietkau #else
33052d7605a7SFelix Fietkau return false;
33062d7605a7SFelix Fietkau #endif
33072d7605a7SFelix Fietkau }
33082d7605a7SFelix Fietkau
mtk_device_event(struct notifier_block * n,unsigned long event,void * ptr)3309f63959c7SFelix Fietkau static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3310f63959c7SFelix Fietkau {
3311f63959c7SFelix Fietkau struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3312f63959c7SFelix Fietkau struct mtk_eth *eth = mac->hw;
3313f63959c7SFelix Fietkau struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3314f63959c7SFelix Fietkau struct ethtool_link_ksettings s;
3315f63959c7SFelix Fietkau struct net_device *ldev;
3316f63959c7SFelix Fietkau struct list_head *iter;
3317f63959c7SFelix Fietkau struct dsa_port *dp;
3318f63959c7SFelix Fietkau
3319f63959c7SFelix Fietkau if (event != NETDEV_CHANGE)
3320f63959c7SFelix Fietkau return NOTIFY_DONE;
3321f63959c7SFelix Fietkau
3322f63959c7SFelix Fietkau netdev_for_each_lower_dev(dev, ldev, iter) {
3323f63959c7SFelix Fietkau if (netdev_priv(ldev) == mac)
3324f63959c7SFelix Fietkau goto found;
3325f63959c7SFelix Fietkau }
3326f63959c7SFelix Fietkau
3327f63959c7SFelix Fietkau return NOTIFY_DONE;
3328f63959c7SFelix Fietkau
3329f63959c7SFelix Fietkau found:
3330f63959c7SFelix Fietkau if (!dsa_slave_dev_check(dev))
3331f63959c7SFelix Fietkau return NOTIFY_DONE;
3332f63959c7SFelix Fietkau
3333f63959c7SFelix Fietkau if (__ethtool_get_link_ksettings(dev, &s))
3334f63959c7SFelix Fietkau return NOTIFY_DONE;
3335f63959c7SFelix Fietkau
3336f63959c7SFelix Fietkau if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3337f63959c7SFelix Fietkau return NOTIFY_DONE;
3338f63959c7SFelix Fietkau
3339f63959c7SFelix Fietkau dp = dsa_port_from_netdev(dev);
3340f63959c7SFelix Fietkau if (dp->index >= MTK_QDMA_NUM_QUEUES)
3341f63959c7SFelix Fietkau return NOTIFY_DONE;
3342f63959c7SFelix Fietkau
3343e669ce46SFelix Fietkau if (mac->speed > 0 && mac->speed <= s.base.speed)
3344e669ce46SFelix Fietkau s.base.speed = 0;
3345e669ce46SFelix Fietkau
3346f63959c7SFelix Fietkau mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3347f63959c7SFelix Fietkau
3348f63959c7SFelix Fietkau return NOTIFY_DONE;
3349f63959c7SFelix Fietkau }
3350f63959c7SFelix Fietkau
mtk_open(struct net_device * dev)3351656e7052SJohn Crispin static int mtk_open(struct net_device *dev)
3352656e7052SJohn Crispin {
3353656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
3354656e7052SJohn Crispin struct mtk_eth *eth = mac->hw;
33552d7605a7SFelix Fietkau int i, err;
33562d7605a7SFelix Fietkau
3357b8fc9f30SRené van Dorst err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3358b8fc9f30SRené van Dorst if (err) {
3359b8fc9f30SRené van Dorst netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3360b8fc9f30SRené van Dorst err);
3361b8fc9f30SRené van Dorst return err;
3362b8fc9f30SRené van Dorst }
3363656e7052SJohn Crispin
3364656e7052SJohn Crispin /* we run 2 netdevs on the same dma ring so we only bring it up once */
3365c6d4e63eSElena Reshetova if (!refcount_read(ð->dma_refcnt)) {
3366329bce51SLorenzo Bianconi const struct mtk_soc_data *soc = eth->soc;
33674ff1a3fcSLorenzo Bianconi u32 gdm_config;
33684ff1a3fcSLorenzo Bianconi int i;
3369656e7052SJohn Crispin
3370ba37b7caSFelix Fietkau err = mtk_start_dma(eth);
3371f7007414SLiu Jian if (err) {
3372f7007414SLiu Jian phylink_disconnect_phy(mac->phylink);
3373656e7052SJohn Crispin return err;
3374f7007414SLiu Jian }
3375656e7052SJohn Crispin
33764ff1a3fcSLorenzo Bianconi for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
33774ff1a3fcSLorenzo Bianconi mtk_ppe_start(eth->ppe[i]);
3378ba37b7caSFelix Fietkau
33794ff1a3fcSLorenzo Bianconi gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
33804ff1a3fcSLorenzo Bianconi : MTK_GDMA_TO_PDMA;
3381ba37b7caSFelix Fietkau mtk_gdm_config(eth, gdm_config);
33825ac9eda0SMarkLee
338380673029SJohn Crispin napi_enable(ð->tx_napi);
3384656e7052SJohn Crispin napi_enable(ð->rx_napi);
33855cce0322SJohn Crispin mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3386329bce51SLorenzo Bianconi mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3387c6d4e63eSElena Reshetova refcount_set(ð->dma_refcnt, 1);
3388656e7052SJohn Crispin }
3389c6d4e63eSElena Reshetova else
3390c6d4e63eSElena Reshetova refcount_inc(ð->dma_refcnt);
3391656e7052SJohn Crispin
3392b8fc9f30SRené van Dorst phylink_start(mac->phylink);
3393f63959c7SFelix Fietkau netif_tx_start_all_queues(dev);
3394f63959c7SFelix Fietkau
3395a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth))
3396c6d96df9SFelix Fietkau return 0;
3397c6d96df9SFelix Fietkau
3398c6d96df9SFelix Fietkau if (mtk_uses_dsa(dev) && !eth->prog) {
3399c6d96df9SFelix Fietkau for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3400c6d96df9SFelix Fietkau struct metadata_dst *md_dst = eth->dsa_meta[i];
3401c6d96df9SFelix Fietkau
3402c6d96df9SFelix Fietkau if (md_dst)
3403c6d96df9SFelix Fietkau continue;
3404c6d96df9SFelix Fietkau
3405c6d96df9SFelix Fietkau md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3406c6d96df9SFelix Fietkau GFP_KERNEL);
3407c6d96df9SFelix Fietkau if (!md_dst)
3408c6d96df9SFelix Fietkau return -ENOMEM;
3409c6d96df9SFelix Fietkau
3410c6d96df9SFelix Fietkau md_dst->u.port_info.port_id = i;
3411c6d96df9SFelix Fietkau eth->dsa_meta[i] = md_dst;
3412c6d96df9SFelix Fietkau }
3413c6d96df9SFelix Fietkau } else {
341404910d8cSArınç ÜNAL /* Hardware DSA untagging and VLAN RX offloading need to be
341504910d8cSArınç ÜNAL * disabled if at least one MAC does not use DSA.
3416c6d96df9SFelix Fietkau */
3417c6d96df9SFelix Fietkau u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3418c6d96df9SFelix Fietkau
3419c6d96df9SFelix Fietkau val &= ~MTK_CDMP_STAG_EN;
3420c6d96df9SFelix Fietkau mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3421c6d96df9SFelix Fietkau
3422c6d96df9SFelix Fietkau mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3423c6d96df9SFelix Fietkau }
3424c6d96df9SFelix Fietkau
3425656e7052SJohn Crispin return 0;
3426656e7052SJohn Crispin }
3427656e7052SJohn Crispin
mtk_stop_dma(struct mtk_eth * eth,u32 glo_cfg)3428656e7052SJohn Crispin static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3429656e7052SJohn Crispin {
3430656e7052SJohn Crispin u32 val;
3431656e7052SJohn Crispin int i;
3432656e7052SJohn Crispin
3433656e7052SJohn Crispin /* stop the dma engine */
3434e3e9652aSSean Wang spin_lock_bh(ð->page_lock);
3435656e7052SJohn Crispin val = mtk_r32(eth, glo_cfg);
3436656e7052SJohn Crispin mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3437656e7052SJohn Crispin glo_cfg);
3438e3e9652aSSean Wang spin_unlock_bh(ð->page_lock);
3439656e7052SJohn Crispin
3440656e7052SJohn Crispin /* wait for dma stop */
3441656e7052SJohn Crispin for (i = 0; i < 10; i++) {
3442656e7052SJohn Crispin val = mtk_r32(eth, glo_cfg);
3443656e7052SJohn Crispin if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3444656e7052SJohn Crispin msleep(20);
3445656e7052SJohn Crispin continue;
3446656e7052SJohn Crispin }
3447656e7052SJohn Crispin break;
3448656e7052SJohn Crispin }
3449656e7052SJohn Crispin }
3450656e7052SJohn Crispin
mtk_stop(struct net_device * dev)3451656e7052SJohn Crispin static int mtk_stop(struct net_device *dev)
3452656e7052SJohn Crispin {
3453656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
3454656e7052SJohn Crispin struct mtk_eth *eth = mac->hw;
34554ff1a3fcSLorenzo Bianconi int i;
3456656e7052SJohn Crispin
3457b8fc9f30SRené van Dorst phylink_stop(mac->phylink);
3458b8fc9f30SRené van Dorst
3459656e7052SJohn Crispin netif_tx_disable(dev);
3460b8fc9f30SRené van Dorst
3461b8fc9f30SRené van Dorst phylink_disconnect_phy(mac->phylink);
3462656e7052SJohn Crispin
3463656e7052SJohn Crispin /* only shutdown DMA if this is the last user */
3464c6d4e63eSElena Reshetova if (!refcount_dec_and_test(ð->dma_refcnt))
3465656e7052SJohn Crispin return 0;
3466656e7052SJohn Crispin
34678d66a818SMarkLee mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
34688d66a818SMarkLee
34695cce0322SJohn Crispin mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3470160d3a9bSLorenzo Bianconi mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
347180673029SJohn Crispin napi_disable(ð->tx_napi);
3472656e7052SJohn Crispin napi_disable(ð->rx_napi);
3473656e7052SJohn Crispin
3474e9229ffdSFelix Fietkau cancel_work_sync(ð->rx_dim.work);
3475e9229ffdSFelix Fietkau cancel_work_sync(ð->tx_dim.work);
3476e9229ffdSFelix Fietkau
3477296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
34788cb42714SLorenzo Bianconi mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
34798cb42714SLorenzo Bianconi mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3480656e7052SJohn Crispin
3481656e7052SJohn Crispin mtk_dma_free(eth);
3482656e7052SJohn Crispin
34834ff1a3fcSLorenzo Bianconi for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
34844ff1a3fcSLorenzo Bianconi mtk_ppe_stop(eth->ppe[i]);
3485ba37b7caSFelix Fietkau
3486656e7052SJohn Crispin return 0;
3487656e7052SJohn Crispin }
3488656e7052SJohn Crispin
mtk_xdp_setup(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)34897c26c20dSLorenzo Bianconi static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
34907c26c20dSLorenzo Bianconi struct netlink_ext_ack *extack)
34917c26c20dSLorenzo Bianconi {
34927c26c20dSLorenzo Bianconi struct mtk_mac *mac = netdev_priv(dev);
34937c26c20dSLorenzo Bianconi struct mtk_eth *eth = mac->hw;
34947c26c20dSLorenzo Bianconi struct bpf_prog *old_prog;
34957c26c20dSLorenzo Bianconi bool need_update;
34967c26c20dSLorenzo Bianconi
34977c26c20dSLorenzo Bianconi if (eth->hwlro) {
34987c26c20dSLorenzo Bianconi NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
34997c26c20dSLorenzo Bianconi return -EOPNOTSUPP;
35007c26c20dSLorenzo Bianconi }
35017c26c20dSLorenzo Bianconi
35027c26c20dSLorenzo Bianconi if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
35037c26c20dSLorenzo Bianconi NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
35047c26c20dSLorenzo Bianconi return -EOPNOTSUPP;
35057c26c20dSLorenzo Bianconi }
35067c26c20dSLorenzo Bianconi
35077c26c20dSLorenzo Bianconi need_update = !!eth->prog != !!prog;
35087c26c20dSLorenzo Bianconi if (netif_running(dev) && need_update)
35097c26c20dSLorenzo Bianconi mtk_stop(dev);
35107c26c20dSLorenzo Bianconi
35117c26c20dSLorenzo Bianconi old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
35127c26c20dSLorenzo Bianconi if (old_prog)
35137c26c20dSLorenzo Bianconi bpf_prog_put(old_prog);
35147c26c20dSLorenzo Bianconi
35157c26c20dSLorenzo Bianconi if (netif_running(dev) && need_update)
35167c26c20dSLorenzo Bianconi return mtk_open(dev);
35177c26c20dSLorenzo Bianconi
35187c26c20dSLorenzo Bianconi return 0;
35197c26c20dSLorenzo Bianconi }
35207c26c20dSLorenzo Bianconi
mtk_xdp(struct net_device * dev,struct netdev_bpf * xdp)35217c26c20dSLorenzo Bianconi static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
35227c26c20dSLorenzo Bianconi {
35237c26c20dSLorenzo Bianconi switch (xdp->command) {
35247c26c20dSLorenzo Bianconi case XDP_SETUP_PROG:
35257c26c20dSLorenzo Bianconi return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
35267c26c20dSLorenzo Bianconi default:
35277c26c20dSLorenzo Bianconi return -EINVAL;
35287c26c20dSLorenzo Bianconi }
35297c26c20dSLorenzo Bianconi }
35307c26c20dSLorenzo Bianconi
ethsys_reset(struct mtk_eth * eth,u32 reset_bits)35312a8307aaSSean Wang static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
35322a8307aaSSean Wang {
35332a8307aaSSean Wang regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
35342a8307aaSSean Wang reset_bits,
35352a8307aaSSean Wang reset_bits);
35362a8307aaSSean Wang
35372a8307aaSSean Wang usleep_range(1000, 1100);
35382a8307aaSSean Wang regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
35392a8307aaSSean Wang reset_bits,
35402a8307aaSSean Wang ~reset_bits);
35412a8307aaSSean Wang mdelay(10);
35422a8307aaSSean Wang }
35432a8307aaSSean Wang
mtk_clk_disable(struct mtk_eth * eth)35442ec50f57SSean Wang static void mtk_clk_disable(struct mtk_eth *eth)
35452ec50f57SSean Wang {
35462ec50f57SSean Wang int clk;
35472ec50f57SSean Wang
35482ec50f57SSean Wang for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
35492ec50f57SSean Wang clk_disable_unprepare(eth->clks[clk]);
35502ec50f57SSean Wang }
35512ec50f57SSean Wang
mtk_clk_enable(struct mtk_eth * eth)35522ec50f57SSean Wang static int mtk_clk_enable(struct mtk_eth *eth)
35532ec50f57SSean Wang {
35542ec50f57SSean Wang int clk, ret;
35552ec50f57SSean Wang
35562ec50f57SSean Wang for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
35572ec50f57SSean Wang ret = clk_prepare_enable(eth->clks[clk]);
35582ec50f57SSean Wang if (ret)
35592ec50f57SSean Wang goto err_disable_clks;
35602ec50f57SSean Wang }
35612ec50f57SSean Wang
35622ec50f57SSean Wang return 0;
35632ec50f57SSean Wang
35642ec50f57SSean Wang err_disable_clks:
35652ec50f57SSean Wang while (--clk >= 0)
35662ec50f57SSean Wang clk_disable_unprepare(eth->clks[clk]);
35672ec50f57SSean Wang
35682ec50f57SSean Wang return ret;
35692ec50f57SSean Wang }
35702ec50f57SSean Wang
mtk_dim_rx(struct work_struct * work)3571e9229ffdSFelix Fietkau static void mtk_dim_rx(struct work_struct *work)
3572e9229ffdSFelix Fietkau {
3573e9229ffdSFelix Fietkau struct dim *dim = container_of(work, struct dim, work);
3574e9229ffdSFelix Fietkau struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
35758cb42714SLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3576e9229ffdSFelix Fietkau struct dim_cq_moder cur_profile;
3577e9229ffdSFelix Fietkau u32 val, cur;
3578e9229ffdSFelix Fietkau
3579e9229ffdSFelix Fietkau cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3580e9229ffdSFelix Fietkau dim->profile_ix);
3581e9229ffdSFelix Fietkau spin_lock_bh(ð->dim_lock);
3582e9229ffdSFelix Fietkau
35838cb42714SLorenzo Bianconi val = mtk_r32(eth, reg_map->pdma.delay_irq);
3584e9229ffdSFelix Fietkau val &= MTK_PDMA_DELAY_TX_MASK;
3585e9229ffdSFelix Fietkau val |= MTK_PDMA_DELAY_RX_EN;
3586e9229ffdSFelix Fietkau
3587e9229ffdSFelix Fietkau cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3588e9229ffdSFelix Fietkau val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3589e9229ffdSFelix Fietkau
3590e9229ffdSFelix Fietkau cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3591e9229ffdSFelix Fietkau val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3592e9229ffdSFelix Fietkau
35938cb42714SLorenzo Bianconi mtk_w32(eth, val, reg_map->pdma.delay_irq);
3594430bfe05SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
35958cb42714SLorenzo Bianconi mtk_w32(eth, val, reg_map->qdma.delay_irq);
3596e9229ffdSFelix Fietkau
3597e9229ffdSFelix Fietkau spin_unlock_bh(ð->dim_lock);
3598e9229ffdSFelix Fietkau
3599e9229ffdSFelix Fietkau dim->state = DIM_START_MEASURE;
3600e9229ffdSFelix Fietkau }
3601e9229ffdSFelix Fietkau
mtk_dim_tx(struct work_struct * work)3602e9229ffdSFelix Fietkau static void mtk_dim_tx(struct work_struct *work)
3603e9229ffdSFelix Fietkau {
3604e9229ffdSFelix Fietkau struct dim *dim = container_of(work, struct dim, work);
3605e9229ffdSFelix Fietkau struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
36068cb42714SLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3607e9229ffdSFelix Fietkau struct dim_cq_moder cur_profile;
3608e9229ffdSFelix Fietkau u32 val, cur;
3609e9229ffdSFelix Fietkau
3610e9229ffdSFelix Fietkau cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3611e9229ffdSFelix Fietkau dim->profile_ix);
3612e9229ffdSFelix Fietkau spin_lock_bh(ð->dim_lock);
3613e9229ffdSFelix Fietkau
36148cb42714SLorenzo Bianconi val = mtk_r32(eth, reg_map->pdma.delay_irq);
3615e9229ffdSFelix Fietkau val &= MTK_PDMA_DELAY_RX_MASK;
3616e9229ffdSFelix Fietkau val |= MTK_PDMA_DELAY_TX_EN;
3617e9229ffdSFelix Fietkau
3618e9229ffdSFelix Fietkau cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3619e9229ffdSFelix Fietkau val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3620e9229ffdSFelix Fietkau
3621e9229ffdSFelix Fietkau cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3622e9229ffdSFelix Fietkau val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3623e9229ffdSFelix Fietkau
36248cb42714SLorenzo Bianconi mtk_w32(eth, val, reg_map->pdma.delay_irq);
3625430bfe05SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
36268cb42714SLorenzo Bianconi mtk_w32(eth, val, reg_map->qdma.delay_irq);
3627e9229ffdSFelix Fietkau
3628e9229ffdSFelix Fietkau spin_unlock_bh(ð->dim_lock);
3629e9229ffdSFelix Fietkau
3630e9229ffdSFelix Fietkau dim->state = DIM_START_MEASURE;
3631e9229ffdSFelix Fietkau }
3632e9229ffdSFelix Fietkau
mtk_set_mcr_max_rx(struct mtk_mac * mac,u32 val)3633b677d6c7SLorenzo Bianconi static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3634b677d6c7SLorenzo Bianconi {
3635b677d6c7SLorenzo Bianconi struct mtk_eth *eth = mac->hw;
3636b677d6c7SLorenzo Bianconi u32 mcr_cur, mcr_new;
3637b677d6c7SLorenzo Bianconi
3638b677d6c7SLorenzo Bianconi if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3639b677d6c7SLorenzo Bianconi return;
3640b677d6c7SLorenzo Bianconi
3641b677d6c7SLorenzo Bianconi mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3642b677d6c7SLorenzo Bianconi mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3643b677d6c7SLorenzo Bianconi
3644b677d6c7SLorenzo Bianconi if (val <= 1518)
3645b677d6c7SLorenzo Bianconi mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3646b677d6c7SLorenzo Bianconi else if (val <= 1536)
3647b677d6c7SLorenzo Bianconi mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3648b677d6c7SLorenzo Bianconi else if (val <= 1552)
3649b677d6c7SLorenzo Bianconi mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3650b677d6c7SLorenzo Bianconi else
3651b677d6c7SLorenzo Bianconi mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3652b677d6c7SLorenzo Bianconi
3653b677d6c7SLorenzo Bianconi if (mcr_new != mcr_cur)
3654b677d6c7SLorenzo Bianconi mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3655b677d6c7SLorenzo Bianconi }
3656b677d6c7SLorenzo Bianconi
mtk_hw_reset(struct mtk_eth * eth)3657bccd19bcSLorenzo Bianconi static void mtk_hw_reset(struct mtk_eth *eth)
3658bccd19bcSLorenzo Bianconi {
3659bccd19bcSLorenzo Bianconi u32 val;
3660bccd19bcSLorenzo Bianconi
366188c1e6efSDaniel Golle if (mtk_is_netsys_v2_or_greater(eth))
3662bccd19bcSLorenzo Bianconi regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
366388c1e6efSDaniel Golle
366488c1e6efSDaniel Golle if (mtk_is_netsys_v3_or_greater(eth)) {
366588c1e6efSDaniel Golle val = RSTCTRL_PPE0_V3;
366688c1e6efSDaniel Golle
366788c1e6efSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
366888c1e6efSDaniel Golle val |= RSTCTRL_PPE1_V3;
366988c1e6efSDaniel Golle
367088c1e6efSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
367188c1e6efSDaniel Golle val |= RSTCTRL_PPE2;
367288c1e6efSDaniel Golle
367388c1e6efSDaniel Golle val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
367488c1e6efSDaniel Golle } else if (mtk_is_netsys_v2_or_greater(eth)) {
3675bccd19bcSLorenzo Bianconi val = RSTCTRL_PPE0_V2;
367688c1e6efSDaniel Golle
367788c1e6efSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
367888c1e6efSDaniel Golle val |= RSTCTRL_PPE1;
3679bccd19bcSLorenzo Bianconi } else {
3680bccd19bcSLorenzo Bianconi val = RSTCTRL_PPE0;
3681bccd19bcSLorenzo Bianconi }
3682bccd19bcSLorenzo Bianconi
3683bccd19bcSLorenzo Bianconi ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3684bccd19bcSLorenzo Bianconi
368588c1e6efSDaniel Golle if (mtk_is_netsys_v3_or_greater(eth))
368688c1e6efSDaniel Golle regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
368788c1e6efSDaniel Golle 0x6f8ff);
368888c1e6efSDaniel Golle else if (mtk_is_netsys_v2_or_greater(eth))
3689bccd19bcSLorenzo Bianconi regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3690bccd19bcSLorenzo Bianconi 0x3ffffff);
3691bccd19bcSLorenzo Bianconi }
3692bccd19bcSLorenzo Bianconi
mtk_hw_reset_read(struct mtk_eth * eth)3693a9724b9cSLorenzo Bianconi static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3694a9724b9cSLorenzo Bianconi {
3695a9724b9cSLorenzo Bianconi u32 val;
3696a9724b9cSLorenzo Bianconi
3697a9724b9cSLorenzo Bianconi regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3698a9724b9cSLorenzo Bianconi return val;
3699a9724b9cSLorenzo Bianconi }
3700a9724b9cSLorenzo Bianconi
mtk_hw_warm_reset(struct mtk_eth * eth)3701a9724b9cSLorenzo Bianconi static void mtk_hw_warm_reset(struct mtk_eth *eth)
3702a9724b9cSLorenzo Bianconi {
3703a9724b9cSLorenzo Bianconi u32 rst_mask, val;
3704a9724b9cSLorenzo Bianconi
3705a9724b9cSLorenzo Bianconi regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3706a9724b9cSLorenzo Bianconi RSTCTRL_FE);
3707a9724b9cSLorenzo Bianconi if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3708a9724b9cSLorenzo Bianconi val & RSTCTRL_FE, 1, 1000)) {
3709a9724b9cSLorenzo Bianconi dev_err(eth->dev, "warm reset failed\n");
3710a9724b9cSLorenzo Bianconi mtk_hw_reset(eth);
3711a9724b9cSLorenzo Bianconi return;
3712a9724b9cSLorenzo Bianconi }
3713a9724b9cSLorenzo Bianconi
371488c1e6efSDaniel Golle if (mtk_is_netsys_v3_or_greater(eth)) {
371588c1e6efSDaniel Golle rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
371688c1e6efSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
371788c1e6efSDaniel Golle rst_mask |= RSTCTRL_PPE1_V3;
371888c1e6efSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
371988c1e6efSDaniel Golle rst_mask |= RSTCTRL_PPE2;
3720a9724b9cSLorenzo Bianconi
372188c1e6efSDaniel Golle rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
372288c1e6efSDaniel Golle } else if (mtk_is_netsys_v2_or_greater(eth)) {
372388c1e6efSDaniel Golle rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3724a9724b9cSLorenzo Bianconi if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3725a9724b9cSLorenzo Bianconi rst_mask |= RSTCTRL_PPE1;
372688c1e6efSDaniel Golle } else {
372788c1e6efSDaniel Golle rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
372888c1e6efSDaniel Golle }
3729a9724b9cSLorenzo Bianconi
3730a9724b9cSLorenzo Bianconi regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3731a9724b9cSLorenzo Bianconi
3732a9724b9cSLorenzo Bianconi udelay(1);
3733a9724b9cSLorenzo Bianconi val = mtk_hw_reset_read(eth);
3734a9724b9cSLorenzo Bianconi if (!(val & rst_mask))
3735a9724b9cSLorenzo Bianconi dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3736a9724b9cSLorenzo Bianconi val, rst_mask);
3737a9724b9cSLorenzo Bianconi
3738a9724b9cSLorenzo Bianconi rst_mask |= RSTCTRL_FE;
3739a9724b9cSLorenzo Bianconi regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3740a9724b9cSLorenzo Bianconi
3741a9724b9cSLorenzo Bianconi udelay(1);
3742a9724b9cSLorenzo Bianconi val = mtk_hw_reset_read(eth);
3743a9724b9cSLorenzo Bianconi if (val & rst_mask)
3744a9724b9cSLorenzo Bianconi dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3745a9724b9cSLorenzo Bianconi val, rst_mask);
3746a9724b9cSLorenzo Bianconi }
3747a9724b9cSLorenzo Bianconi
mtk_hw_check_dma_hang(struct mtk_eth * eth)374893b2591aSLorenzo Bianconi static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
374993b2591aSLorenzo Bianconi {
375093b2591aSLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
375193b2591aSLorenzo Bianconi bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
375293b2591aSLorenzo Bianconi bool oq_hang, cdm1_busy, adma_busy;
375393b2591aSLorenzo Bianconi bool wtx_busy, cdm_full, oq_free;
375493b2591aSLorenzo Bianconi u32 wdidx, val, gdm1_fc, gdm2_fc;
375593b2591aSLorenzo Bianconi bool qfsm_hang, qfwd_hang;
375693b2591aSLorenzo Bianconi bool ret = false;
375793b2591aSLorenzo Bianconi
375893b2591aSLorenzo Bianconi if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
375993b2591aSLorenzo Bianconi return false;
376093b2591aSLorenzo Bianconi
376193b2591aSLorenzo Bianconi /* WDMA sanity checks */
376293b2591aSLorenzo Bianconi wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
376393b2591aSLorenzo Bianconi
376493b2591aSLorenzo Bianconi val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
376593b2591aSLorenzo Bianconi wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
376693b2591aSLorenzo Bianconi
376793b2591aSLorenzo Bianconi val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
376893b2591aSLorenzo Bianconi cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
376993b2591aSLorenzo Bianconi
377093b2591aSLorenzo Bianconi oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
377193b2591aSLorenzo Bianconi !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
377293b2591aSLorenzo Bianconi !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
377393b2591aSLorenzo Bianconi
377493b2591aSLorenzo Bianconi if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
377593b2591aSLorenzo Bianconi if (++eth->reset.wdma_hang_count > 2) {
377693b2591aSLorenzo Bianconi eth->reset.wdma_hang_count = 0;
377793b2591aSLorenzo Bianconi ret = true;
377893b2591aSLorenzo Bianconi }
377993b2591aSLorenzo Bianconi goto out;
378093b2591aSLorenzo Bianconi }
378193b2591aSLorenzo Bianconi
378293b2591aSLorenzo Bianconi /* QDMA sanity checks */
378393b2591aSLorenzo Bianconi qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
378493b2591aSLorenzo Bianconi qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
378593b2591aSLorenzo Bianconi
378693b2591aSLorenzo Bianconi gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
378793b2591aSLorenzo Bianconi gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
378893b2591aSLorenzo Bianconi gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
378993b2591aSLorenzo Bianconi gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
379093b2591aSLorenzo Bianconi gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
379193b2591aSLorenzo Bianconi gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
379293b2591aSLorenzo Bianconi
379393b2591aSLorenzo Bianconi if (qfsm_hang && qfwd_hang &&
379493b2591aSLorenzo Bianconi ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
379593b2591aSLorenzo Bianconi (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
379693b2591aSLorenzo Bianconi if (++eth->reset.qdma_hang_count > 2) {
379793b2591aSLorenzo Bianconi eth->reset.qdma_hang_count = 0;
379893b2591aSLorenzo Bianconi ret = true;
379993b2591aSLorenzo Bianconi }
380093b2591aSLorenzo Bianconi goto out;
380193b2591aSLorenzo Bianconi }
380293b2591aSLorenzo Bianconi
380393b2591aSLorenzo Bianconi /* ADMA sanity checks */
380493b2591aSLorenzo Bianconi oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
380593b2591aSLorenzo Bianconi cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
380693b2591aSLorenzo Bianconi adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
380793b2591aSLorenzo Bianconi !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
380893b2591aSLorenzo Bianconi
380993b2591aSLorenzo Bianconi if (oq_hang && cdm1_busy && adma_busy) {
381093b2591aSLorenzo Bianconi if (++eth->reset.adma_hang_count > 2) {
381193b2591aSLorenzo Bianconi eth->reset.adma_hang_count = 0;
381293b2591aSLorenzo Bianconi ret = true;
381393b2591aSLorenzo Bianconi }
381493b2591aSLorenzo Bianconi goto out;
381593b2591aSLorenzo Bianconi }
381693b2591aSLorenzo Bianconi
381793b2591aSLorenzo Bianconi eth->reset.wdma_hang_count = 0;
381893b2591aSLorenzo Bianconi eth->reset.qdma_hang_count = 0;
381993b2591aSLorenzo Bianconi eth->reset.adma_hang_count = 0;
382093b2591aSLorenzo Bianconi out:
382193b2591aSLorenzo Bianconi eth->reset.wdidx = wdidx;
382293b2591aSLorenzo Bianconi
382393b2591aSLorenzo Bianconi return ret;
382493b2591aSLorenzo Bianconi }
382593b2591aSLorenzo Bianconi
mtk_hw_reset_monitor_work(struct work_struct * work)382693b2591aSLorenzo Bianconi static void mtk_hw_reset_monitor_work(struct work_struct *work)
382793b2591aSLorenzo Bianconi {
382893b2591aSLorenzo Bianconi struct delayed_work *del_work = to_delayed_work(work);
382993b2591aSLorenzo Bianconi struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
383093b2591aSLorenzo Bianconi reset.monitor_work);
383193b2591aSLorenzo Bianconi
383293b2591aSLorenzo Bianconi if (test_bit(MTK_RESETTING, ð->state))
383393b2591aSLorenzo Bianconi goto out;
383493b2591aSLorenzo Bianconi
383593b2591aSLorenzo Bianconi /* DMA stuck checks */
383693b2591aSLorenzo Bianconi if (mtk_hw_check_dma_hang(eth))
383793b2591aSLorenzo Bianconi schedule_work(ð->pending_work);
383893b2591aSLorenzo Bianconi
383993b2591aSLorenzo Bianconi out:
384093b2591aSLorenzo Bianconi schedule_delayed_work(ð->reset.monitor_work,
384193b2591aSLorenzo Bianconi MTK_DMA_MONITOR_TIMEOUT);
384293b2591aSLorenzo Bianconi }
384393b2591aSLorenzo Bianconi
mtk_hw_init(struct mtk_eth * eth,bool reset)3844a9724b9cSLorenzo Bianconi static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3845656e7052SJohn Crispin {
3846d776a57eSFelix Fietkau u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3847d776a57eSFelix Fietkau ETHSYS_DMA_AG_MAP_PPE;
38488cb42714SLorenzo Bianconi const struct mtk_reg_map *reg_map = eth->soc->reg_map;
38492ec50f57SSean Wang int i, val, ret;
38509ea4d311SSean Wang
385106127504SLorenzo Bianconi if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
38529ea4d311SSean Wang return 0;
385385574dbfSSean Wang
385406127504SLorenzo Bianconi if (!reset) {
385526a2ad8aSSean Wang pm_runtime_enable(eth->dev);
385626a2ad8aSSean Wang pm_runtime_get_sync(eth->dev);
385726a2ad8aSSean Wang
38582ec50f57SSean Wang ret = mtk_clk_enable(eth);
38592ec50f57SSean Wang if (ret)
38602ec50f57SSean Wang goto err_disable_pm;
386106127504SLorenzo Bianconi }
38622ec50f57SSean Wang
3863d776a57eSFelix Fietkau if (eth->ethsys)
3864d776a57eSFelix Fietkau regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3865d776a57eSFelix Fietkau of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3866d776a57eSFelix Fietkau
3867296c9120SStefan Roese if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3868296c9120SStefan Roese ret = device_reset(eth->dev);
3869296c9120SStefan Roese if (ret) {
3870296c9120SStefan Roese dev_err(eth->dev, "MAC reset failed!\n");
3871296c9120SStefan Roese goto err_disable_pm;
3872296c9120SStefan Roese }
3873296c9120SStefan Roese
3874430bfe05SStefan Roese /* set interrupt delays based on current Net DIM sample */
3875430bfe05SStefan Roese mtk_dim_rx(ð->rx_dim.work);
3876430bfe05SStefan Roese mtk_dim_tx(ð->tx_dim.work);
3877430bfe05SStefan Roese
3878296c9120SStefan Roese /* disable delay and normal interrupt */
3879296c9120SStefan Roese mtk_tx_irq_disable(eth, ~0);
3880296c9120SStefan Roese mtk_rx_irq_disable(eth, ~0);
3881296c9120SStefan Roese
3882296c9120SStefan Roese return 0;
3883296c9120SStefan Roese }
3884296c9120SStefan Roese
3885a9724b9cSLorenzo Bianconi msleep(100);
3886a9724b9cSLorenzo Bianconi
3887a9724b9cSLorenzo Bianconi if (reset)
3888a9724b9cSLorenzo Bianconi mtk_hw_warm_reset(eth);
3889a9724b9cSLorenzo Bianconi else
3890bccd19bcSLorenzo Bianconi mtk_hw_reset(eth);
3891160d3a9bSLorenzo Bianconi
3892a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth)) {
3893160d3a9bSLorenzo Bianconi /* Set FE to PDMAv2 if necessary */
3894160d3a9bSLorenzo Bianconi val = mtk_r32(eth, MTK_FE_GLO_MISC);
3895160d3a9bSLorenzo Bianconi mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
3896160d3a9bSLorenzo Bianconi }
3897656e7052SJohn Crispin
3898243dc5fbSSean Wang if (eth->pctl) {
3899656e7052SJohn Crispin /* Set GE2 driving and slew rate */
3900656e7052SJohn Crispin regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3901656e7052SJohn Crispin
3902656e7052SJohn Crispin /* set GE2 TDSEL */
3903656e7052SJohn Crispin regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3904656e7052SJohn Crispin
3905656e7052SJohn Crispin /* set GE2 TUNE */
3906656e7052SJohn Crispin regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3907243dc5fbSSean Wang }
3908656e7052SJohn Crispin
39097352e252SSean Wang /* Set linkdown as the default for each GMAC. Its own MCR would be set
3910b8fc9f30SRené van Dorst * up with the more appropriate value when mtk_mac_config call is being
3911b8fc9f30SRené van Dorst * invoked.
39127352e252SSean Wang */
3913e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
3914b677d6c7SLorenzo Bianconi struct net_device *dev = eth->netdev[i];
3915b677d6c7SLorenzo Bianconi
3916e05fd627SLorenzo Bianconi if (!dev)
3917e05fd627SLorenzo Bianconi continue;
3918b677d6c7SLorenzo Bianconi
3919e05fd627SLorenzo Bianconi mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3920e05fd627SLorenzo Bianconi mtk_set_mcr_max_rx(netdev_priv(dev),
3921e05fd627SLorenzo Bianconi dev->mtu + MTK_RX_ETH_HLEN);
3922b677d6c7SLorenzo Bianconi }
3923656e7052SJohn Crispin
392487e3df49SSean Wang /* Indicates CDM to parse the MTK special tag from CPU
392587e3df49SSean Wang * which also is working out for untag packets.
392687e3df49SSean Wang */
392787e3df49SSean Wang val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
392887e3df49SSean Wang mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3929a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v1(eth)) {
39302d7605a7SFelix Fietkau val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
39312d7605a7SFelix Fietkau mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
393287e3df49SSean Wang
3933656e7052SJohn Crispin mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3934c6d96df9SFelix Fietkau }
3935656e7052SJohn Crispin
3936e9229ffdSFelix Fietkau /* set interrupt delays based on current Net DIM sample */
3937e9229ffdSFelix Fietkau mtk_dim_rx(ð->rx_dim.work);
3938e9229ffdSFelix Fietkau mtk_dim_tx(ð->tx_dim.work);
3939671d41e6SJohn Crispin
3940656e7052SJohn Crispin /* disable delay and normal interrupt */
39415cce0322SJohn Crispin mtk_tx_irq_disable(eth, ~0);
39425cce0322SJohn Crispin mtk_rx_irq_disable(eth, ~0);
3943656e7052SJohn Crispin
3944656e7052SJohn Crispin /* FE int grouping */
39458cb42714SLorenzo Bianconi mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3946160d3a9bSLorenzo Bianconi mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
39478cb42714SLorenzo Bianconi mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3948160d3a9bSLorenzo Bianconi mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
394980673029SJohn Crispin mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3950656e7052SJohn Crispin
39511953f134SLorenzo Bianconi if (mtk_is_netsys_v3_or_greater(eth)) {
39521953f134SLorenzo Bianconi /* PSE should not drop port1, port8 and port9 packets */
39531953f134SLorenzo Bianconi mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
39541953f134SLorenzo Bianconi
39551953f134SLorenzo Bianconi /* GDM and CDM Threshold */
39561953f134SLorenzo Bianconi mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
39571953f134SLorenzo Bianconi mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
39581953f134SLorenzo Bianconi
39591953f134SLorenzo Bianconi /* Disable GDM1 RX CRC stripping */
39601953f134SLorenzo Bianconi mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
39611953f134SLorenzo Bianconi
39621953f134SLorenzo Bianconi /* PSE GDM3 MIB counter has incorrect hw default values,
39631953f134SLorenzo Bianconi * so the driver ought to read clear the values beforehand
39641953f134SLorenzo Bianconi * in case ethtool retrieve wrong mib values.
39651953f134SLorenzo Bianconi */
39661953f134SLorenzo Bianconi for (i = 0; i < 0x80; i += 0x4)
39671953f134SLorenzo Bianconi mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
39681953f134SLorenzo Bianconi } else if (!mtk_is_netsys_v1(eth)) {
3969f4b2fa2cSFelix Fietkau /* PSE should not drop port8 and port9 packets from WDMA Tx */
3970160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3971160d3a9bSLorenzo Bianconi
3972f4b2fa2cSFelix Fietkau /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3973f4b2fa2cSFelix Fietkau mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3974f4b2fa2cSFelix Fietkau
3975160d3a9bSLorenzo Bianconi /* PSE Free Queue Flow Control */
3976160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3977160d3a9bSLorenzo Bianconi
3978160d3a9bSLorenzo Bianconi /* PSE config input queue threshold */
3979160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3980160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3981160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3982160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3983160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3984160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3985160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3986160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3987160d3a9bSLorenzo Bianconi
3988160d3a9bSLorenzo Bianconi /* PSE config output queue threshold */
3989160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3990160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3991160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3992160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3993160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3994160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3995160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3996160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3997160d3a9bSLorenzo Bianconi
3998160d3a9bSLorenzo Bianconi /* GDM and CDM Threshold */
3999160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4000160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4001160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4002160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4003160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4004160d3a9bSLorenzo Bianconi mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4005160d3a9bSLorenzo Bianconi }
4006160d3a9bSLorenzo Bianconi
4007656e7052SJohn Crispin return 0;
40082ec50f57SSean Wang
40092ec50f57SSean Wang err_disable_pm:
401006127504SLorenzo Bianconi if (!reset) {
40112ec50f57SSean Wang pm_runtime_put_sync(eth->dev);
40122ec50f57SSean Wang pm_runtime_disable(eth->dev);
401306127504SLorenzo Bianconi }
40142ec50f57SSean Wang
40152ec50f57SSean Wang return ret;
4016656e7052SJohn Crispin }
4017656e7052SJohn Crispin
mtk_hw_deinit(struct mtk_eth * eth)4018bf253fb7SSean Wang static int mtk_hw_deinit(struct mtk_eth *eth)
4019bf253fb7SSean Wang {
40209ea4d311SSean Wang if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
40219ea4d311SSean Wang return 0;
40229ea4d311SSean Wang
40232ec50f57SSean Wang mtk_clk_disable(eth);
4024bf253fb7SSean Wang
402526a2ad8aSSean Wang pm_runtime_put_sync(eth->dev);
402626a2ad8aSSean Wang pm_runtime_disable(eth->dev);
402726a2ad8aSSean Wang
4028bf253fb7SSean Wang return 0;
4029bf253fb7SSean Wang }
4030bf253fb7SSean Wang
mtk_uninit(struct net_device * dev)4031656e7052SJohn Crispin static void mtk_uninit(struct net_device *dev)
4032656e7052SJohn Crispin {
4033656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
4034656e7052SJohn Crispin struct mtk_eth *eth = mac->hw;
4035656e7052SJohn Crispin
4036b8fc9f30SRené van Dorst phylink_disconnect_phy(mac->phylink);
40375cce0322SJohn Crispin mtk_tx_irq_disable(eth, ~0);
40385cce0322SJohn Crispin mtk_rx_irq_disable(eth, ~0);
4039656e7052SJohn Crispin }
4040656e7052SJohn Crispin
mtk_change_mtu(struct net_device * dev,int new_mtu)40414fd59792SDENG Qingfang static int mtk_change_mtu(struct net_device *dev, int new_mtu)
40424fd59792SDENG Qingfang {
40434fd59792SDENG Qingfang int length = new_mtu + MTK_RX_ETH_HLEN;
40444fd59792SDENG Qingfang struct mtk_mac *mac = netdev_priv(dev);
40454fd59792SDENG Qingfang struct mtk_eth *eth = mac->hw;
40464fd59792SDENG Qingfang
40477c26c20dSLorenzo Bianconi if (rcu_access_pointer(eth->prog) &&
40487c26c20dSLorenzo Bianconi length > MTK_PP_MAX_BUF_SIZE) {
40497c26c20dSLorenzo Bianconi netdev_err(dev, "Invalid MTU for XDP mode\n");
40507c26c20dSLorenzo Bianconi return -EINVAL;
40517c26c20dSLorenzo Bianconi }
40527c26c20dSLorenzo Bianconi
4053b677d6c7SLorenzo Bianconi mtk_set_mcr_max_rx(mac, length);
40544fd59792SDENG Qingfang dev->mtu = new_mtu;
40554fd59792SDENG Qingfang
40564fd59792SDENG Qingfang return 0;
40574fd59792SDENG Qingfang }
40584fd59792SDENG Qingfang
mtk_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4059656e7052SJohn Crispin static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4060656e7052SJohn Crispin {
4061b8fc9f30SRené van Dorst struct mtk_mac *mac = netdev_priv(dev);
4062b8fc9f30SRené van Dorst
4063656e7052SJohn Crispin switch (cmd) {
4064656e7052SJohn Crispin case SIOCGMIIPHY:
4065656e7052SJohn Crispin case SIOCGMIIREG:
4066656e7052SJohn Crispin case SIOCSMIIREG:
4067b8fc9f30SRené van Dorst return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4068656e7052SJohn Crispin default:
4069656e7052SJohn Crispin break;
4070656e7052SJohn Crispin }
4071656e7052SJohn Crispin
4072656e7052SJohn Crispin return -EOPNOTSUPP;
4073656e7052SJohn Crispin }
4074656e7052SJohn Crispin
mtk_prepare_for_reset(struct mtk_eth * eth)407506127504SLorenzo Bianconi static void mtk_prepare_for_reset(struct mtk_eth *eth)
407606127504SLorenzo Bianconi {
407706127504SLorenzo Bianconi u32 val;
407806127504SLorenzo Bianconi int i;
407906127504SLorenzo Bianconi
408088c1e6efSDaniel Golle /* set FE PPE ports link down */
408188c1e6efSDaniel Golle for (i = MTK_GMAC1_ID;
408288c1e6efSDaniel Golle i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
408388c1e6efSDaniel Golle i += 2) {
408488c1e6efSDaniel Golle val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
408506127504SLorenzo Bianconi if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
408688c1e6efSDaniel Golle val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
408788c1e6efSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
408888c1e6efSDaniel Golle val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
408988c1e6efSDaniel Golle mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
409088c1e6efSDaniel Golle }
409106127504SLorenzo Bianconi
409206127504SLorenzo Bianconi /* adjust PPE configurations to prepare for reset */
409306127504SLorenzo Bianconi for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
409406127504SLorenzo Bianconi mtk_ppe_prepare_reset(eth->ppe[i]);
409506127504SLorenzo Bianconi
409606127504SLorenzo Bianconi /* disable NETSYS interrupts */
409706127504SLorenzo Bianconi mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
409806127504SLorenzo Bianconi
409906127504SLorenzo Bianconi /* force link down GMAC */
410006127504SLorenzo Bianconi for (i = 0; i < 2; i++) {
410106127504SLorenzo Bianconi val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
410206127504SLorenzo Bianconi mtk_w32(eth, val, MTK_MAC_MCR(i));
410306127504SLorenzo Bianconi }
410406127504SLorenzo Bianconi }
410506127504SLorenzo Bianconi
mtk_pending_work(struct work_struct * work)4106656e7052SJohn Crispin static void mtk_pending_work(struct work_struct *work)
4107656e7052SJohn Crispin {
41087c78b4adSJohn Crispin struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4109e7d425dcSJohn Crispin unsigned long restart = 0;
411006127504SLorenzo Bianconi u32 val;
411106127504SLorenzo Bianconi int i;
4112656e7052SJohn Crispin
4113656e7052SJohn Crispin rtnl_lock();
4114ec8cd134SLorenzo Bianconi set_bit(MTK_RESETTING, ð->state);
4115dce6fa42SSean Wang
411606127504SLorenzo Bianconi mtk_prepare_for_reset(eth);
411708a764a7SLorenzo Bianconi mtk_wed_fe_reset();
411808a764a7SLorenzo Bianconi /* Run again reset preliminary configuration in order to avoid any
411908a764a7SLorenzo Bianconi * possible race during FE reset since it can run releasing RTNL lock.
412008a764a7SLorenzo Bianconi */
412108a764a7SLorenzo Bianconi mtk_prepare_for_reset(eth);
412206127504SLorenzo Bianconi
4123e7d425dcSJohn Crispin /* stop all devices to make sure that dma is properly shut down */
4124e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
412506127504SLorenzo Bianconi if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4126e7d425dcSJohn Crispin continue;
412706127504SLorenzo Bianconi
4128e7d425dcSJohn Crispin mtk_stop(eth->netdev[i]);
4129e7d425dcSJohn Crispin __set_bit(i, &restart);
4130e7d425dcSJohn Crispin }
4131e7d425dcSJohn Crispin
413206127504SLorenzo Bianconi usleep_range(15000, 16000);
41339ea4d311SSean Wang
41349ea4d311SSean Wang if (eth->dev->pins)
41359ea4d311SSean Wang pinctrl_select_state(eth->dev->pins->p,
41369ea4d311SSean Wang eth->dev->pins->default_state);
4137a9724b9cSLorenzo Bianconi mtk_hw_init(eth, true);
41389ea4d311SSean Wang
4139e7d425dcSJohn Crispin /* restart DMA and enable IRQs */
4140e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
4141e05fd627SLorenzo Bianconi if (!eth->netdev[i] || !test_bit(i, &restart))
4142e7d425dcSJohn Crispin continue;
414306127504SLorenzo Bianconi
414406127504SLorenzo Bianconi if (mtk_open(eth->netdev[i])) {
4145e7d425dcSJohn Crispin netif_alert(eth, ifup, eth->netdev[i],
414606127504SLorenzo Bianconi "Driver up/down cycle failed\n");
4147e7d425dcSJohn Crispin dev_close(eth->netdev[i]);
4148e7d425dcSJohn Crispin }
4149656e7052SJohn Crispin }
4150dce6fa42SSean Wang
415188c1e6efSDaniel Golle /* set FE PPE ports link up */
415288c1e6efSDaniel Golle for (i = MTK_GMAC1_ID;
415388c1e6efSDaniel Golle i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
415488c1e6efSDaniel Golle i += 2) {
415588c1e6efSDaniel Golle val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
415606127504SLorenzo Bianconi if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
415788c1e6efSDaniel Golle val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
415888c1e6efSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
415988c1e6efSDaniel Golle val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
416088c1e6efSDaniel Golle
416188c1e6efSDaniel Golle mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
416288c1e6efSDaniel Golle }
4163dce6fa42SSean Wang
4164ec8cd134SLorenzo Bianconi clear_bit(MTK_RESETTING, ð->state);
4165dce6fa42SSean Wang
416608a764a7SLorenzo Bianconi mtk_wed_fe_reset_complete();
416708a764a7SLorenzo Bianconi
4168656e7052SJohn Crispin rtnl_unlock();
4169656e7052SJohn Crispin }
4170656e7052SJohn Crispin
mtk_free_dev(struct mtk_eth * eth)41718a8a9e89SSean Wang static int mtk_free_dev(struct mtk_eth *eth)
4172656e7052SJohn Crispin {
4173656e7052SJohn Crispin int i;
4174656e7052SJohn Crispin
4175e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
4176656e7052SJohn Crispin if (!eth->netdev[i])
4177656e7052SJohn Crispin continue;
4178656e7052SJohn Crispin free_netdev(eth->netdev[i]);
4179656e7052SJohn Crispin }
41808a8a9e89SSean Wang
41812d7605a7SFelix Fietkau for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
41822d7605a7SFelix Fietkau if (!eth->dsa_meta[i])
41832d7605a7SFelix Fietkau break;
41842d7605a7SFelix Fietkau metadata_dst_free(eth->dsa_meta[i]);
41852d7605a7SFelix Fietkau }
41862d7605a7SFelix Fietkau
41878a8a9e89SSean Wang return 0;
41888a8a9e89SSean Wang }
41898a8a9e89SSean Wang
mtk_unreg_dev(struct mtk_eth * eth)41908a8a9e89SSean Wang static int mtk_unreg_dev(struct mtk_eth *eth)
41918a8a9e89SSean Wang {
41928a8a9e89SSean Wang int i;
41938a8a9e89SSean Wang
4194e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
4195f63959c7SFelix Fietkau struct mtk_mac *mac;
41968a8a9e89SSean Wang if (!eth->netdev[i])
41978a8a9e89SSean Wang continue;
4198f63959c7SFelix Fietkau mac = netdev_priv(eth->netdev[i]);
4199f63959c7SFelix Fietkau if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4200f63959c7SFelix Fietkau unregister_netdevice_notifier(&mac->device_notifier);
42018a8a9e89SSean Wang unregister_netdev(eth->netdev[i]);
42028a8a9e89SSean Wang }
42038a8a9e89SSean Wang
42048a8a9e89SSean Wang return 0;
42058a8a9e89SSean Wang }
42068a8a9e89SSean Wang
mtk_sgmii_destroy(struct mtk_eth * eth)42072a3ec7aeSDaniel Golle static void mtk_sgmii_destroy(struct mtk_eth *eth)
42082a3ec7aeSDaniel Golle {
42092a3ec7aeSDaniel Golle int i;
42102a3ec7aeSDaniel Golle
42112a3ec7aeSDaniel Golle for (i = 0; i < MTK_MAX_DEVS; i++)
42122a3ec7aeSDaniel Golle mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
42132a3ec7aeSDaniel Golle }
42142a3ec7aeSDaniel Golle
mtk_cleanup(struct mtk_eth * eth)42158a8a9e89SSean Wang static int mtk_cleanup(struct mtk_eth *eth)
42168a8a9e89SSean Wang {
42172a3ec7aeSDaniel Golle mtk_sgmii_destroy(eth);
42188a8a9e89SSean Wang mtk_unreg_dev(eth);
42198a8a9e89SSean Wang mtk_free_dev(eth);
42207c78b4adSJohn Crispin cancel_work_sync(ð->pending_work);
422193b2591aSLorenzo Bianconi cancel_delayed_work_sync(ð->reset.monitor_work);
4222656e7052SJohn Crispin
4223656e7052SJohn Crispin return 0;
4224656e7052SJohn Crispin }
4225656e7052SJohn Crispin
mtk_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)42263a82e78cSBaoyou Xie static int mtk_get_link_ksettings(struct net_device *ndev,
42273e60b748SSean Wang struct ethtool_link_ksettings *cmd)
4228656e7052SJohn Crispin {
42293e60b748SSean Wang struct mtk_mac *mac = netdev_priv(ndev);
4230656e7052SJohn Crispin
4231dce6fa42SSean Wang if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4232dce6fa42SSean Wang return -EBUSY;
4233dce6fa42SSean Wang
4234b8fc9f30SRené van Dorst return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4235656e7052SJohn Crispin }
4236656e7052SJohn Crispin
mtk_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)42373a82e78cSBaoyou Xie static int mtk_set_link_ksettings(struct net_device *ndev,
42383e60b748SSean Wang const struct ethtool_link_ksettings *cmd)
4239656e7052SJohn Crispin {
42403e60b748SSean Wang struct mtk_mac *mac = netdev_priv(ndev);
4241656e7052SJohn Crispin
42423e60b748SSean Wang if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
42433e60b748SSean Wang return -EBUSY;
4244656e7052SJohn Crispin
4245b8fc9f30SRené van Dorst return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4246656e7052SJohn Crispin }
4247656e7052SJohn Crispin
mtk_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4248656e7052SJohn Crispin static void mtk_get_drvinfo(struct net_device *dev,
4249656e7052SJohn Crispin struct ethtool_drvinfo *info)
4250656e7052SJohn Crispin {
4251656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
4252656e7052SJohn Crispin
4253f029c781SWolfram Sang strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4254f029c781SWolfram Sang strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4255656e7052SJohn Crispin info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4256656e7052SJohn Crispin }
4257656e7052SJohn Crispin
mtk_get_msglevel(struct net_device * dev)4258656e7052SJohn Crispin static u32 mtk_get_msglevel(struct net_device *dev)
4259656e7052SJohn Crispin {
4260656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
4261656e7052SJohn Crispin
4262656e7052SJohn Crispin return mac->hw->msg_enable;
4263656e7052SJohn Crispin }
4264656e7052SJohn Crispin
mtk_set_msglevel(struct net_device * dev,u32 value)4265656e7052SJohn Crispin static void mtk_set_msglevel(struct net_device *dev, u32 value)
4266656e7052SJohn Crispin {
4267656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
4268656e7052SJohn Crispin
4269656e7052SJohn Crispin mac->hw->msg_enable = value;
4270656e7052SJohn Crispin }
4271656e7052SJohn Crispin
mtk_nway_reset(struct net_device * dev)4272656e7052SJohn Crispin static int mtk_nway_reset(struct net_device *dev)
4273656e7052SJohn Crispin {
4274656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
4275656e7052SJohn Crispin
4276dce6fa42SSean Wang if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4277dce6fa42SSean Wang return -EBUSY;
4278dce6fa42SSean Wang
4279b8fc9f30SRené van Dorst if (!mac->phylink)
4280b8fc9f30SRené van Dorst return -ENOTSUPP;
4281656e7052SJohn Crispin
4282b8fc9f30SRené van Dorst return phylink_ethtool_nway_reset(mac->phylink);
4283656e7052SJohn Crispin }
4284656e7052SJohn Crispin
mtk_get_strings(struct net_device * dev,u32 stringset,u8 * data)4285656e7052SJohn Crispin static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4286656e7052SJohn Crispin {
4287656e7052SJohn Crispin int i;
4288656e7052SJohn Crispin
4289656e7052SJohn Crispin switch (stringset) {
429084b9cd38SLorenzo Bianconi case ETH_SS_STATS: {
429184b9cd38SLorenzo Bianconi struct mtk_mac *mac = netdev_priv(dev);
429284b9cd38SLorenzo Bianconi
4293656e7052SJohn Crispin for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4294656e7052SJohn Crispin memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4295656e7052SJohn Crispin data += ETH_GSTRING_LEN;
4296656e7052SJohn Crispin }
429784b9cd38SLorenzo Bianconi if (mtk_page_pool_enabled(mac->hw))
429884b9cd38SLorenzo Bianconi page_pool_ethtool_stats_get_strings(data);
429984b9cd38SLorenzo Bianconi break;
430084b9cd38SLorenzo Bianconi }
430184b9cd38SLorenzo Bianconi default:
4302656e7052SJohn Crispin break;
4303656e7052SJohn Crispin }
4304656e7052SJohn Crispin }
4305656e7052SJohn Crispin
mtk_get_sset_count(struct net_device * dev,int sset)4306656e7052SJohn Crispin static int mtk_get_sset_count(struct net_device *dev, int sset)
4307656e7052SJohn Crispin {
4308656e7052SJohn Crispin switch (sset) {
430984b9cd38SLorenzo Bianconi case ETH_SS_STATS: {
431084b9cd38SLorenzo Bianconi int count = ARRAY_SIZE(mtk_ethtool_stats);
431184b9cd38SLorenzo Bianconi struct mtk_mac *mac = netdev_priv(dev);
431284b9cd38SLorenzo Bianconi
431384b9cd38SLorenzo Bianconi if (mtk_page_pool_enabled(mac->hw))
431484b9cd38SLorenzo Bianconi count += page_pool_ethtool_stats_get_count();
431584b9cd38SLorenzo Bianconi return count;
431684b9cd38SLorenzo Bianconi }
4317656e7052SJohn Crispin default:
4318656e7052SJohn Crispin return -EOPNOTSUPP;
4319656e7052SJohn Crispin }
4320656e7052SJohn Crispin }
4321656e7052SJohn Crispin
mtk_ethtool_pp_stats(struct mtk_eth * eth,u64 * data)432284b9cd38SLorenzo Bianconi static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
432384b9cd38SLorenzo Bianconi {
432484b9cd38SLorenzo Bianconi struct page_pool_stats stats = {};
432584b9cd38SLorenzo Bianconi int i;
432684b9cd38SLorenzo Bianconi
432784b9cd38SLorenzo Bianconi for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
432884b9cd38SLorenzo Bianconi struct mtk_rx_ring *ring = ð->rx_ring[i];
432984b9cd38SLorenzo Bianconi
433084b9cd38SLorenzo Bianconi if (!ring->page_pool)
433184b9cd38SLorenzo Bianconi continue;
433284b9cd38SLorenzo Bianconi
433384b9cd38SLorenzo Bianconi page_pool_get_stats(ring->page_pool, &stats);
433484b9cd38SLorenzo Bianconi }
433584b9cd38SLorenzo Bianconi page_pool_ethtool_stats_get(data, &stats);
433684b9cd38SLorenzo Bianconi }
433784b9cd38SLorenzo Bianconi
mtk_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)4338656e7052SJohn Crispin static void mtk_get_ethtool_stats(struct net_device *dev,
4339656e7052SJohn Crispin struct ethtool_stats *stats, u64 *data)
4340656e7052SJohn Crispin {
4341656e7052SJohn Crispin struct mtk_mac *mac = netdev_priv(dev);
4342656e7052SJohn Crispin struct mtk_hw_stats *hwstats = mac->hw_stats;
4343656e7052SJohn Crispin u64 *data_src, *data_dst;
4344656e7052SJohn Crispin unsigned int start;
4345656e7052SJohn Crispin int i;
4346656e7052SJohn Crispin
4347dce6fa42SSean Wang if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4348dce6fa42SSean Wang return;
4349dce6fa42SSean Wang
4350656e7052SJohn Crispin if (netif_running(dev) && netif_device_present(dev)) {
43518d32e062SSean Wang if (spin_trylock_bh(&hwstats->stats_lock)) {
4352656e7052SJohn Crispin mtk_stats_update_mac(mac);
43538d32e062SSean Wang spin_unlock_bh(&hwstats->stats_lock);
4354656e7052SJohn Crispin }
4355656e7052SJohn Crispin }
4356656e7052SJohn Crispin
4357656e7052SJohn Crispin data_src = (u64 *)hwstats;
435894d308d0SSean Wang
435994d308d0SSean Wang do {
4360656e7052SJohn Crispin data_dst = data;
4361068c38adSThomas Gleixner start = u64_stats_fetch_begin(&hwstats->syncp);
4362656e7052SJohn Crispin
4363656e7052SJohn Crispin for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4364656e7052SJohn Crispin *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
436584b9cd38SLorenzo Bianconi if (mtk_page_pool_enabled(mac->hw))
436684b9cd38SLorenzo Bianconi mtk_ethtool_pp_stats(mac->hw, data_dst);
4367068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&hwstats->syncp, start));
4368656e7052SJohn Crispin }
4369656e7052SJohn Crispin
mtk_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)43707aab747eSNelson Chang static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
43717aab747eSNelson Chang u32 *rule_locs)
43727aab747eSNelson Chang {
43737aab747eSNelson Chang int ret = -EOPNOTSUPP;
43747aab747eSNelson Chang
43757aab747eSNelson Chang switch (cmd->cmd) {
43767aab747eSNelson Chang case ETHTOOL_GRXRINGS:
43779e4f56f1SSean Wang if (dev->hw_features & NETIF_F_LRO) {
43787aab747eSNelson Chang cmd->data = MTK_MAX_RX_RING_NUM;
43797aab747eSNelson Chang ret = 0;
43807aab747eSNelson Chang }
43817aab747eSNelson Chang break;
43827aab747eSNelson Chang case ETHTOOL_GRXCLSRLCNT:
43839e4f56f1SSean Wang if (dev->hw_features & NETIF_F_LRO) {
43847aab747eSNelson Chang struct mtk_mac *mac = netdev_priv(dev);
43857aab747eSNelson Chang
43867aab747eSNelson Chang cmd->rule_cnt = mac->hwlro_ip_cnt;
43877aab747eSNelson Chang ret = 0;
43887aab747eSNelson Chang }
43897aab747eSNelson Chang break;
43907aab747eSNelson Chang case ETHTOOL_GRXCLSRULE:
43919e4f56f1SSean Wang if (dev->hw_features & NETIF_F_LRO)
43927aab747eSNelson Chang ret = mtk_hwlro_get_fdir_entry(dev, cmd);
43937aab747eSNelson Chang break;
43947aab747eSNelson Chang case ETHTOOL_GRXCLSRLALL:
43959e4f56f1SSean Wang if (dev->hw_features & NETIF_F_LRO)
43967aab747eSNelson Chang ret = mtk_hwlro_get_fdir_all(dev, cmd,
43977aab747eSNelson Chang rule_locs);
43987aab747eSNelson Chang break;
43997aab747eSNelson Chang default:
44007aab747eSNelson Chang break;
44017aab747eSNelson Chang }
44027aab747eSNelson Chang
44037aab747eSNelson Chang return ret;
44047aab747eSNelson Chang }
44057aab747eSNelson Chang
mtk_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)44067aab747eSNelson Chang static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
44077aab747eSNelson Chang {
44087aab747eSNelson Chang int ret = -EOPNOTSUPP;
44097aab747eSNelson Chang
44107aab747eSNelson Chang switch (cmd->cmd) {
44117aab747eSNelson Chang case ETHTOOL_SRXCLSRLINS:
44129e4f56f1SSean Wang if (dev->hw_features & NETIF_F_LRO)
44137aab747eSNelson Chang ret = mtk_hwlro_add_ipaddr(dev, cmd);
44147aab747eSNelson Chang break;
44157aab747eSNelson Chang case ETHTOOL_SRXCLSRLDEL:
44169e4f56f1SSean Wang if (dev->hw_features & NETIF_F_LRO)
44177aab747eSNelson Chang ret = mtk_hwlro_del_ipaddr(dev, cmd);
44187aab747eSNelson Chang break;
44197aab747eSNelson Chang default:
44207aab747eSNelson Chang break;
44217aab747eSNelson Chang }
44227aab747eSNelson Chang
44237aab747eSNelson Chang return ret;
44247aab747eSNelson Chang }
44257aab747eSNelson Chang
mtk_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4426f63959c7SFelix Fietkau static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4427f63959c7SFelix Fietkau struct net_device *sb_dev)
4428f63959c7SFelix Fietkau {
4429f63959c7SFelix Fietkau struct mtk_mac *mac = netdev_priv(dev);
4430f63959c7SFelix Fietkau unsigned int queue = 0;
4431f63959c7SFelix Fietkau
4432f63959c7SFelix Fietkau if (netdev_uses_dsa(dev))
4433f63959c7SFelix Fietkau queue = skb_get_queue_mapping(skb) + 3;
4434f63959c7SFelix Fietkau else
4435f63959c7SFelix Fietkau queue = mac->id;
4436f63959c7SFelix Fietkau
4437f63959c7SFelix Fietkau if (queue >= dev->num_tx_queues)
4438f63959c7SFelix Fietkau queue = 0;
4439f63959c7SFelix Fietkau
4440f63959c7SFelix Fietkau return queue;
4441f63959c7SFelix Fietkau }
4442f63959c7SFelix Fietkau
44436a38cb15SJulia Lawall static const struct ethtool_ops mtk_ethtool_ops = {
44443e60b748SSean Wang .get_link_ksettings = mtk_get_link_ksettings,
44453e60b748SSean Wang .set_link_ksettings = mtk_set_link_ksettings,
4446656e7052SJohn Crispin .get_drvinfo = mtk_get_drvinfo,
4447656e7052SJohn Crispin .get_msglevel = mtk_get_msglevel,
4448656e7052SJohn Crispin .set_msglevel = mtk_set_msglevel,
4449656e7052SJohn Crispin .nway_reset = mtk_nway_reset,
4450b8fc9f30SRené van Dorst .get_link = ethtool_op_get_link,
4451656e7052SJohn Crispin .get_strings = mtk_get_strings,
4452656e7052SJohn Crispin .get_sset_count = mtk_get_sset_count,
4453656e7052SJohn Crispin .get_ethtool_stats = mtk_get_ethtool_stats,
44547aab747eSNelson Chang .get_rxnfc = mtk_get_rxnfc,
44557aab747eSNelson Chang .set_rxnfc = mtk_set_rxnfc,
4456656e7052SJohn Crispin };
4457656e7052SJohn Crispin
4458656e7052SJohn Crispin static const struct net_device_ops mtk_netdev_ops = {
4459656e7052SJohn Crispin .ndo_uninit = mtk_uninit,
4460656e7052SJohn Crispin .ndo_open = mtk_open,
4461656e7052SJohn Crispin .ndo_stop = mtk_stop,
4462656e7052SJohn Crispin .ndo_start_xmit = mtk_start_xmit,
4463656e7052SJohn Crispin .ndo_set_mac_address = mtk_set_mac_address,
4464656e7052SJohn Crispin .ndo_validate_addr = eth_validate_addr,
4465a7605370SArnd Bergmann .ndo_eth_ioctl = mtk_do_ioctl,
44664fd59792SDENG Qingfang .ndo_change_mtu = mtk_change_mtu,
4467656e7052SJohn Crispin .ndo_tx_timeout = mtk_tx_timeout,
4468656e7052SJohn Crispin .ndo_get_stats64 = mtk_get_stats64,
44697aab747eSNelson Chang .ndo_fix_features = mtk_fix_features,
44707aab747eSNelson Chang .ndo_set_features = mtk_set_features,
4471656e7052SJohn Crispin #ifdef CONFIG_NET_POLL_CONTROLLER
4472656e7052SJohn Crispin .ndo_poll_controller = mtk_poll_controller,
4473656e7052SJohn Crispin #endif
4474502e84e2SFelix Fietkau .ndo_setup_tc = mtk_eth_setup_tc,
44757c26c20dSLorenzo Bianconi .ndo_bpf = mtk_xdp,
44765886d26fSLorenzo Bianconi .ndo_xdp_xmit = mtk_xdp_xmit,
4477f63959c7SFelix Fietkau .ndo_select_queue = mtk_select_queue,
4478656e7052SJohn Crispin };
4479656e7052SJohn Crispin
mtk_add_mac(struct mtk_eth * eth,struct device_node * np)4480656e7052SJohn Crispin static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4481656e7052SJohn Crispin {
4482656e7052SJohn Crispin const __be32 *_id = of_get_property(np, "reg", NULL);
44830c65b2b9SAndrew Lunn phy_interface_t phy_mode;
4484b8fc9f30SRené van Dorst struct phylink *phylink;
4485b8fc9f30SRené van Dorst struct mtk_mac *mac;
44860c65b2b9SAndrew Lunn int id, err;
4487f63959c7SFelix Fietkau int txqs = 1;
44887910898eSRussell King (Oracle) u32 val;
4489656e7052SJohn Crispin
4490656e7052SJohn Crispin if (!_id) {
4491656e7052SJohn Crispin dev_err(eth->dev, "missing mac id\n");
4492656e7052SJohn Crispin return -EINVAL;
4493656e7052SJohn Crispin }
4494656e7052SJohn Crispin
4495656e7052SJohn Crispin id = be32_to_cpup(_id);
4496e05fd627SLorenzo Bianconi if (id >= MTK_MAX_DEVS) {
4497656e7052SJohn Crispin dev_err(eth->dev, "%d is not a valid mac id\n", id);
4498656e7052SJohn Crispin return -EINVAL;
4499656e7052SJohn Crispin }
4500656e7052SJohn Crispin
4501656e7052SJohn Crispin if (eth->netdev[id]) {
4502656e7052SJohn Crispin dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4503656e7052SJohn Crispin return -EINVAL;
4504656e7052SJohn Crispin }
4505656e7052SJohn Crispin
4506f63959c7SFelix Fietkau if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4507f63959c7SFelix Fietkau txqs = MTK_QDMA_NUM_QUEUES;
4508f63959c7SFelix Fietkau
4509f63959c7SFelix Fietkau eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4510656e7052SJohn Crispin if (!eth->netdev[id]) {
4511656e7052SJohn Crispin dev_err(eth->dev, "alloc_etherdev failed\n");
4512656e7052SJohn Crispin return -ENOMEM;
4513656e7052SJohn Crispin }
4514656e7052SJohn Crispin mac = netdev_priv(eth->netdev[id]);
4515656e7052SJohn Crispin eth->mac[id] = mac;
4516656e7052SJohn Crispin mac->id = id;
4517656e7052SJohn Crispin mac->hw = eth;
4518656e7052SJohn Crispin mac->of_node = np;
4519656e7052SJohn Crispin
45201d6d537dSDaniel Golle err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
45211d6d537dSDaniel Golle if (err == -EPROBE_DEFER)
45221d6d537dSDaniel Golle return err;
45231d6d537dSDaniel Golle
45241d6d537dSDaniel Golle if (err) {
45251d6d537dSDaniel Golle /* If the mac address is invalid, use random mac address */
45261d6d537dSDaniel Golle eth_hw_addr_random(eth->netdev[id]);
45271d6d537dSDaniel Golle dev_err(eth->dev, "generated random MAC address %pM\n",
45281d6d537dSDaniel Golle eth->netdev[id]->dev_addr);
45291d6d537dSDaniel Golle }
45301d6d537dSDaniel Golle
4531ee406810SNelson Chang memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4532ee406810SNelson Chang mac->hwlro_ip_cnt = 0;
4533ee406810SNelson Chang
4534656e7052SJohn Crispin mac->hw_stats = devm_kzalloc(eth->dev,
4535656e7052SJohn Crispin sizeof(*mac->hw_stats),
4536656e7052SJohn Crispin GFP_KERNEL);
4537656e7052SJohn Crispin if (!mac->hw_stats) {
4538656e7052SJohn Crispin dev_err(eth->dev, "failed to allocate counter memory\n");
4539656e7052SJohn Crispin err = -ENOMEM;
4540656e7052SJohn Crispin goto free_netdev;
4541656e7052SJohn Crispin }
4542656e7052SJohn Crispin spin_lock_init(&mac->hw_stats->stats_lock);
4543d7005652Ssean.wang@mediatek.com u64_stats_init(&mac->hw_stats->syncp);
45441953f134SLorenzo Bianconi
45451953f134SLorenzo Bianconi if (mtk_is_netsys_v3_or_greater(eth))
45461953f134SLorenzo Bianconi mac->hw_stats->reg_offset = id * 0x80;
45471953f134SLorenzo Bianconi else
45481953f134SLorenzo Bianconi mac->hw_stats->reg_offset = id * 0x40;
4549656e7052SJohn Crispin
4550b8fc9f30SRené van Dorst /* phylink create */
45510c65b2b9SAndrew Lunn err = of_get_phy_mode(np, &phy_mode);
45520c65b2b9SAndrew Lunn if (err) {
4553b8fc9f30SRené van Dorst dev_err(eth->dev, "incorrect phy-mode\n");
4554b8fc9f30SRené van Dorst goto free_netdev;
4555b8fc9f30SRené van Dorst }
4556b8fc9f30SRené van Dorst
4557b8fc9f30SRené van Dorst /* mac config is not set */
4558b8fc9f30SRené van Dorst mac->interface = PHY_INTERFACE_MODE_NA;
4559b8fc9f30SRené van Dorst mac->speed = SPEED_UNKNOWN;
4560b8fc9f30SRené van Dorst
4561b8fc9f30SRené van Dorst mac->phylink_config.dev = ð->netdev[id]->dev;
4562b8fc9f30SRené van Dorst mac->phylink_config.type = PHYLINK_NETDEV;
4563a4238f6cSRussell King (Oracle) mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4564a4238f6cSRussell King (Oracle) MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4565a4238f6cSRussell King (Oracle)
456676a4cb75SRussell King (Oracle) /* MT7623 gmac0 is now missing its speed-specific PLL configuration
456776a4cb75SRussell King (Oracle) * in its .mac_config method (since state->speed is not valid there.
456876a4cb75SRussell King (Oracle) * Disable support for MII, GMII and RGMII.
456976a4cb75SRussell King (Oracle) */
457076a4cb75SRussell King (Oracle) if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
457183800d29SRussell King (Oracle) __set_bit(PHY_INTERFACE_MODE_MII,
457283800d29SRussell King (Oracle) mac->phylink_config.supported_interfaces);
457383800d29SRussell King (Oracle) __set_bit(PHY_INTERFACE_MODE_GMII,
457483800d29SRussell King (Oracle) mac->phylink_config.supported_interfaces);
457583800d29SRussell King (Oracle)
457683800d29SRussell King (Oracle) if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
457783800d29SRussell King (Oracle) phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
457876a4cb75SRussell King (Oracle) }
457983800d29SRussell King (Oracle)
458083800d29SRussell King (Oracle) if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
458183800d29SRussell King (Oracle) __set_bit(PHY_INTERFACE_MODE_TRGMII,
458283800d29SRussell King (Oracle) mac->phylink_config.supported_interfaces);
458383800d29SRussell King (Oracle)
45847910898eSRussell King (Oracle) /* TRGMII is not permitted on MT7621 if using DDR2 */
45857910898eSRussell King (Oracle) if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
45867910898eSRussell King (Oracle) MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
45877910898eSRussell King (Oracle) regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
45887910898eSRussell King (Oracle) if (val & SYSCFG_DRAM_TYPE_DDR2)
45897910898eSRussell King (Oracle) __clear_bit(PHY_INTERFACE_MODE_TRGMII,
45907910898eSRussell King (Oracle) mac->phylink_config.supported_interfaces);
45917910898eSRussell King (Oracle) }
45927910898eSRussell King (Oracle)
459383800d29SRussell King (Oracle) if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
459483800d29SRussell King (Oracle) __set_bit(PHY_INTERFACE_MODE_SGMII,
459583800d29SRussell King (Oracle) mac->phylink_config.supported_interfaces);
459683800d29SRussell King (Oracle) __set_bit(PHY_INTERFACE_MODE_1000BASEX,
459783800d29SRussell King (Oracle) mac->phylink_config.supported_interfaces);
459883800d29SRussell King (Oracle) __set_bit(PHY_INTERFACE_MODE_2500BASEX,
459983800d29SRussell King (Oracle) mac->phylink_config.supported_interfaces);
460083800d29SRussell King (Oracle) }
4601b8fc9f30SRené van Dorst
4602445eb644SLorenzo Bianconi if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4603445eb644SLorenzo Bianconi MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
4604445eb644SLorenzo Bianconi id == MTK_GMAC1_ID) {
4605445eb644SLorenzo Bianconi mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4606445eb644SLorenzo Bianconi MAC_SYM_PAUSE |
4607445eb644SLorenzo Bianconi MAC_10000FD;
4608445eb644SLorenzo Bianconi phy_interface_zero(mac->phylink_config.supported_interfaces);
4609445eb644SLorenzo Bianconi __set_bit(PHY_INTERFACE_MODE_INTERNAL,
4610445eb644SLorenzo Bianconi mac->phylink_config.supported_interfaces);
4611445eb644SLorenzo Bianconi }
4612445eb644SLorenzo Bianconi
4613b8fc9f30SRené van Dorst phylink = phylink_create(&mac->phylink_config,
4614b8fc9f30SRené van Dorst of_fwnode_handle(mac->of_node),
4615b8fc9f30SRené van Dorst phy_mode, &mtk_phylink_ops);
4616b8fc9f30SRené van Dorst if (IS_ERR(phylink)) {
4617b8fc9f30SRené van Dorst err = PTR_ERR(phylink);
4618b8fc9f30SRené van Dorst goto free_netdev;
4619b8fc9f30SRené van Dorst }
4620b8fc9f30SRené van Dorst
4621b8fc9f30SRené van Dorst mac->phylink = phylink;
4622b8fc9f30SRené van Dorst
4623656e7052SJohn Crispin SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4624eaadf9fdSJohn Crispin eth->netdev[id]->watchdog_timeo = 5 * HZ;
4625656e7052SJohn Crispin eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4626656e7052SJohn Crispin eth->netdev[id]->base_addr = (unsigned long)eth->base;
4627ee406810SNelson Chang
4628296c9120SStefan Roese eth->netdev[id]->hw_features = eth->soc->hw_features;
4629ee406810SNelson Chang if (eth->hwlro)
4630ee406810SNelson Chang eth->netdev[id]->hw_features |= NETIF_F_LRO;
4631ee406810SNelson Chang
4632296c9120SStefan Roese eth->netdev[id]->vlan_features = eth->soc->hw_features &
4633c6d96df9SFelix Fietkau ~NETIF_F_HW_VLAN_CTAG_TX;
4634296c9120SStefan Roese eth->netdev[id]->features |= eth->soc->hw_features;
4635656e7052SJohn Crispin eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4636656e7052SJohn Crispin
463780673029SJohn Crispin eth->netdev[id]->irq = eth->irq[0];
46383174b3b5SSean Wang eth->netdev[id]->dev.of_node = np;
46393174b3b5SSean Wang
46404fd59792SDENG Qingfang if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4641555a8933SLanden Chao eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
46424fd59792SDENG Qingfang else
46434fd59792SDENG Qingfang eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4644555a8933SLanden Chao
4645f63959c7SFelix Fietkau if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4646f63959c7SFelix Fietkau mac->device_notifier.notifier_call = mtk_device_event;
4647f63959c7SFelix Fietkau register_netdevice_notifier(&mac->device_notifier);
4648f63959c7SFelix Fietkau }
4649f63959c7SFelix Fietkau
465066c0e13aSMarek Majtyka if (mtk_page_pool_enabled(eth))
465166c0e13aSMarek Majtyka eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
465266c0e13aSMarek Majtyka NETDEV_XDP_ACT_REDIRECT |
465366c0e13aSMarek Majtyka NETDEV_XDP_ACT_NDO_XMIT |
465466c0e13aSMarek Majtyka NETDEV_XDP_ACT_NDO_XMIT_SG;
465566c0e13aSMarek Majtyka
4656656e7052SJohn Crispin return 0;
4657656e7052SJohn Crispin
4658656e7052SJohn Crispin free_netdev:
4659656e7052SJohn Crispin free_netdev(eth->netdev[id]);
4660656e7052SJohn Crispin return err;
4661656e7052SJohn Crispin }
4662656e7052SJohn Crispin
mtk_eth_set_dma_device(struct mtk_eth * eth,struct device * dma_dev)4663d776a57eSFelix Fietkau void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4664d776a57eSFelix Fietkau {
4665d776a57eSFelix Fietkau struct net_device *dev, *tmp;
4666d776a57eSFelix Fietkau LIST_HEAD(dev_list);
4667d776a57eSFelix Fietkau int i;
4668d776a57eSFelix Fietkau
4669d776a57eSFelix Fietkau rtnl_lock();
4670d776a57eSFelix Fietkau
4671e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
4672d776a57eSFelix Fietkau dev = eth->netdev[i];
4673d776a57eSFelix Fietkau
4674d776a57eSFelix Fietkau if (!dev || !(dev->flags & IFF_UP))
4675d776a57eSFelix Fietkau continue;
4676d776a57eSFelix Fietkau
4677d776a57eSFelix Fietkau list_add_tail(&dev->close_list, &dev_list);
4678d776a57eSFelix Fietkau }
4679d776a57eSFelix Fietkau
4680d776a57eSFelix Fietkau dev_close_many(&dev_list, false);
4681d776a57eSFelix Fietkau
4682d776a57eSFelix Fietkau eth->dma_dev = dma_dev;
4683d776a57eSFelix Fietkau
4684d776a57eSFelix Fietkau list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4685d776a57eSFelix Fietkau list_del_init(&dev->close_list);
4686d776a57eSFelix Fietkau dev_open(dev, NULL);
4687d776a57eSFelix Fietkau }
4688d776a57eSFelix Fietkau
4689d776a57eSFelix Fietkau rtnl_unlock();
4690d776a57eSFelix Fietkau }
4691d776a57eSFelix Fietkau
mtk_sgmii_init(struct mtk_eth * eth)46922a3ec7aeSDaniel Golle static int mtk_sgmii_init(struct mtk_eth *eth)
46932a3ec7aeSDaniel Golle {
46942a3ec7aeSDaniel Golle struct device_node *np;
46952a3ec7aeSDaniel Golle struct regmap *regmap;
46962a3ec7aeSDaniel Golle u32 flags;
46972a3ec7aeSDaniel Golle int i;
46982a3ec7aeSDaniel Golle
46992a3ec7aeSDaniel Golle for (i = 0; i < MTK_MAX_DEVS; i++) {
47002a3ec7aeSDaniel Golle np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
47012a3ec7aeSDaniel Golle if (!np)
47022a3ec7aeSDaniel Golle break;
47032a3ec7aeSDaniel Golle
47042a3ec7aeSDaniel Golle regmap = syscon_node_to_regmap(np);
47052a3ec7aeSDaniel Golle flags = 0;
47062a3ec7aeSDaniel Golle if (of_property_read_bool(np, "mediatek,pnswap"))
47072a3ec7aeSDaniel Golle flags |= MTK_SGMII_FLAG_PN_SWAP;
47082a3ec7aeSDaniel Golle
47092a3ec7aeSDaniel Golle of_node_put(np);
47102a3ec7aeSDaniel Golle
47112a3ec7aeSDaniel Golle if (IS_ERR(regmap))
47122a3ec7aeSDaniel Golle return PTR_ERR(regmap);
47132a3ec7aeSDaniel Golle
47142a3ec7aeSDaniel Golle eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
47152a3ec7aeSDaniel Golle eth->soc->ana_rgc3,
47162a3ec7aeSDaniel Golle flags);
47172a3ec7aeSDaniel Golle }
47182a3ec7aeSDaniel Golle
47192a3ec7aeSDaniel Golle return 0;
47202a3ec7aeSDaniel Golle }
47212a3ec7aeSDaniel Golle
mtk_probe(struct platform_device * pdev)4722656e7052SJohn Crispin static int mtk_probe(struct platform_device *pdev)
4723656e7052SJohn Crispin {
4724ebb1e4f9SDaniel Golle struct resource *res = NULL, *res_sram;
4725656e7052SJohn Crispin struct device_node *mac_np;
4726656e7052SJohn Crispin struct mtk_eth *eth;
4727b8fc9f30SRené van Dorst int err, i;
4728656e7052SJohn Crispin
4729656e7052SJohn Crispin eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4730656e7052SJohn Crispin if (!eth)
4731656e7052SJohn Crispin return -ENOMEM;
4732656e7052SJohn Crispin
4733eda7d46dSRyder Lee eth->soc = of_device_get_match_data(&pdev->dev);
47342ec50f57SSean Wang
4735549e5495SSean Wang eth->dev = &pdev->dev;
4736d776a57eSFelix Fietkau eth->dma_dev = &pdev->dev;
4737566495deSYueHaibing eth->base = devm_platform_ioremap_resource(pdev, 0);
4738621e49f6SVladimir Zapolskiy if (IS_ERR(eth->base))
4739621e49f6SVladimir Zapolskiy return PTR_ERR(eth->base);
4740656e7052SJohn Crispin
4741160d3a9bSLorenzo Bianconi if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4742296c9120SStefan Roese eth->ip_align = NET_IP_ALIGN;
4743296c9120SStefan Roese
4744ebb1e4f9SDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4745ebb1e4f9SDaniel Golle /* SRAM is actual memory and supports transparent access just like DRAM.
4746ebb1e4f9SDaniel Golle * Hence we don't require __iomem being set and don't need to use accessor
4747ebb1e4f9SDaniel Golle * functions to read from or write to SRAM.
4748ebb1e4f9SDaniel Golle */
4749ebb1e4f9SDaniel Golle if (mtk_is_netsys_v3_or_greater(eth)) {
4750ebb1e4f9SDaniel Golle eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
4751ebb1e4f9SDaniel Golle if (IS_ERR(eth->sram_base))
4752ebb1e4f9SDaniel Golle return PTR_ERR(eth->sram_base);
4753ebb1e4f9SDaniel Golle } else {
4754ebb1e4f9SDaniel Golle eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
4755ebb1e4f9SDaniel Golle }
4756ebb1e4f9SDaniel Golle }
4757ebb1e4f9SDaniel Golle
47582d75891eSDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
475901b4ae7eSDaniel Golle err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
476001b4ae7eSDaniel Golle if (!err)
476101b4ae7eSDaniel Golle err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
476201b4ae7eSDaniel Golle
47632d75891eSDaniel Golle if (err) {
47642d75891eSDaniel Golle dev_err(&pdev->dev, "Wrong DMA config\n");
47652d75891eSDaniel Golle return -EINVAL;
47662d75891eSDaniel Golle }
47672d75891eSDaniel Golle }
47682d75891eSDaniel Golle
4769656e7052SJohn Crispin spin_lock_init(ð->page_lock);
47705cce0322SJohn Crispin spin_lock_init(ð->tx_irq_lock);
47715cce0322SJohn Crispin spin_lock_init(ð->rx_irq_lock);
4772e9229ffdSFelix Fietkau spin_lock_init(ð->dim_lock);
4773e9229ffdSFelix Fietkau
4774e9229ffdSFelix Fietkau eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4775e9229ffdSFelix Fietkau INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
477693b2591aSLorenzo Bianconi INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
4777e9229ffdSFelix Fietkau
4778e9229ffdSFelix Fietkau eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4779e9229ffdSFelix Fietkau INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
4780656e7052SJohn Crispin
4781296c9120SStefan Roese if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4782656e7052SJohn Crispin eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4783656e7052SJohn Crispin "mediatek,ethsys");
4784656e7052SJohn Crispin if (IS_ERR(eth->ethsys)) {
4785656e7052SJohn Crispin dev_err(&pdev->dev, "no ethsys regmap found\n");
4786656e7052SJohn Crispin return PTR_ERR(eth->ethsys);
4787656e7052SJohn Crispin }
4788296c9120SStefan Roese }
4789656e7052SJohn Crispin
47907093f9d8SSean Wang if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
47917093f9d8SSean Wang eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
47927093f9d8SSean Wang "mediatek,infracfg");
47937093f9d8SSean Wang if (IS_ERR(eth->infra)) {
47947093f9d8SSean Wang dev_err(&pdev->dev, "no infracfg regmap found\n");
47957093f9d8SSean Wang return PTR_ERR(eth->infra);
47967093f9d8SSean Wang }
47977093f9d8SSean Wang }
47987093f9d8SSean Wang
4799d776a57eSFelix Fietkau if (of_dma_is_coherent(pdev->dev.of_node)) {
4800d776a57eSFelix Fietkau struct regmap *cci;
4801d776a57eSFelix Fietkau
4802d776a57eSFelix Fietkau cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
48034263f77aSLorenzo Bianconi "cci-control-port");
4804d776a57eSFelix Fietkau /* enable CPU/bus coherency */
4805d776a57eSFelix Fietkau if (!IS_ERR(cci))
4806d776a57eSFelix Fietkau regmap_write(cci, 0, 3);
4807d776a57eSFelix Fietkau }
4808d776a57eSFelix Fietkau
480942c03844SSean Wang if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
48102a3ec7aeSDaniel Golle err = mtk_sgmii_init(eth);
48119ffee4a8SSean Wang
48129ffee4a8SSean Wang if (err)
48139ffee4a8SSean Wang return err;
481442c03844SSean Wang }
481542c03844SSean Wang
4816243dc5fbSSean Wang if (eth->soc->required_pctl) {
4817656e7052SJohn Crispin eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4818656e7052SJohn Crispin "mediatek,pctl");
4819656e7052SJohn Crispin if (IS_ERR(eth->pctl)) {
4820656e7052SJohn Crispin dev_err(&pdev->dev, "no pctl regmap found\n");
48212a3ec7aeSDaniel Golle err = PTR_ERR(eth->pctl);
48222a3ec7aeSDaniel Golle goto err_destroy_sgmii;
4823656e7052SJohn Crispin }
4824243dc5fbSSean Wang }
4825656e7052SJohn Crispin
4826a008e2a8SLorenzo Bianconi if (mtk_is_netsys_v2_or_greater(eth)) {
4827de84a090SLorenzo Bianconi res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
48282a3ec7aeSDaniel Golle if (!res) {
48292a3ec7aeSDaniel Golle err = -EINVAL;
48302a3ec7aeSDaniel Golle goto err_destroy_sgmii;
48312a3ec7aeSDaniel Golle }
4832ebb1e4f9SDaniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4833ebb1e4f9SDaniel Golle if (mtk_is_netsys_v3_or_greater(eth)) {
4834ebb1e4f9SDaniel Golle res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4835ebb1e4f9SDaniel Golle if (!res_sram) {
4836ebb1e4f9SDaniel Golle err = -EINVAL;
4837ebb1e4f9SDaniel Golle goto err_destroy_sgmii;
4838ebb1e4f9SDaniel Golle }
4839ebb1e4f9SDaniel Golle eth->phy_scratch_ring = res_sram->start;
4840ebb1e4f9SDaniel Golle } else {
4841ebb1e4f9SDaniel Golle eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4842ebb1e4f9SDaniel Golle }
4843ebb1e4f9SDaniel Golle }
4844de84a090SLorenzo Bianconi }
4845804775dfSFelix Fietkau
4846de84a090SLorenzo Bianconi if (eth->soc->offload_version) {
4847de84a090SLorenzo Bianconi for (i = 0;; i++) {
4848de84a090SLorenzo Bianconi struct device_node *np;
4849de84a090SLorenzo Bianconi phys_addr_t wdma_phy;
4850de84a090SLorenzo Bianconi u32 wdma_base;
4851de84a090SLorenzo Bianconi
4852de84a090SLorenzo Bianconi if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4853804775dfSFelix Fietkau break;
4854804775dfSFelix Fietkau
4855de84a090SLorenzo Bianconi np = of_parse_phandle(pdev->dev.of_node,
4856de84a090SLorenzo Bianconi "mediatek,wed", i);
4857de84a090SLorenzo Bianconi if (!np)
4858de84a090SLorenzo Bianconi break;
4859de84a090SLorenzo Bianconi
4860de84a090SLorenzo Bianconi wdma_base = eth->soc->reg_map->wdma_base[i];
4861de84a090SLorenzo Bianconi wdma_phy = res ? res->start + wdma_base : 0;
4862de84a090SLorenzo Bianconi mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4863de84a090SLorenzo Bianconi wdma_phy, i);
4864de84a090SLorenzo Bianconi }
4865804775dfSFelix Fietkau }
4866804775dfSFelix Fietkau
486780673029SJohn Crispin for (i = 0; i < 3; i++) {
4868889bcbdeSBjørn Mork if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4869889bcbdeSBjørn Mork eth->irq[i] = eth->irq[0];
4870889bcbdeSBjørn Mork else
487180673029SJohn Crispin eth->irq[i] = platform_get_irq(pdev, i);
487280673029SJohn Crispin if (eth->irq[i] < 0) {
487380673029SJohn Crispin dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4874b3d0d981SYang Yingliang err = -ENXIO;
4875b3d0d981SYang Yingliang goto err_wed_exit;
4876656e7052SJohn Crispin }
487780673029SJohn Crispin }
4878549e5495SSean Wang for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4879549e5495SSean Wang eth->clks[i] = devm_clk_get(eth->dev,
4880549e5495SSean Wang mtk_clks_source_name[i]);
4881549e5495SSean Wang if (IS_ERR(eth->clks[i])) {
4882b3d0d981SYang Yingliang if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4883b3d0d981SYang Yingliang err = -EPROBE_DEFER;
4884b3d0d981SYang Yingliang goto err_wed_exit;
4885b3d0d981SYang Yingliang }
48862ec50f57SSean Wang if (eth->soc->required_clks & BIT(i)) {
48872ec50f57SSean Wang dev_err(&pdev->dev, "clock %s not found\n",
48882ec50f57SSean Wang mtk_clks_source_name[i]);
4889b3d0d981SYang Yingliang err = -EINVAL;
4890b3d0d981SYang Yingliang goto err_wed_exit;
48912ec50f57SSean Wang }
48922ec50f57SSean Wang eth->clks[i] = NULL;
4893549e5495SSean Wang }
4894549e5495SSean Wang }
4895656e7052SJohn Crispin
4896656e7052SJohn Crispin eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
48977c78b4adSJohn Crispin INIT_WORK(ð->pending_work, mtk_pending_work);
4898656e7052SJohn Crispin
4899a9724b9cSLorenzo Bianconi err = mtk_hw_init(eth, false);
4900656e7052SJohn Crispin if (err)
4901b3d0d981SYang Yingliang goto err_wed_exit;
4902656e7052SJohn Crispin
49032d14ba72SSean Wang eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4904983e1a6cSNelson Chang
4905656e7052SJohn Crispin for_each_child_of_node(pdev->dev.of_node, mac_np) {
4906656e7052SJohn Crispin if (!of_device_is_compatible(mac_np,
4907656e7052SJohn Crispin "mediatek,eth-mac"))
4908656e7052SJohn Crispin continue;
4909656e7052SJohn Crispin
4910656e7052SJohn Crispin if (!of_device_is_available(mac_np))
4911656e7052SJohn Crispin continue;
4912656e7052SJohn Crispin
4913656e7052SJohn Crispin err = mtk_add_mac(eth, mac_np);
4914cf36dd2fSNishka Dasgupta if (err) {
4915cf36dd2fSNishka Dasgupta of_node_put(mac_np);
49168a8a9e89SSean Wang goto err_deinit_hw;
4917656e7052SJohn Crispin }
4918cf36dd2fSNishka Dasgupta }
4919656e7052SJohn Crispin
4920889bcbdeSBjørn Mork if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4921889bcbdeSBjørn Mork err = devm_request_irq(eth->dev, eth->irq[0],
4922889bcbdeSBjørn Mork mtk_handle_irq, 0,
4923889bcbdeSBjørn Mork dev_name(eth->dev), eth);
4924889bcbdeSBjørn Mork } else {
4925889bcbdeSBjørn Mork err = devm_request_irq(eth->dev, eth->irq[1],
4926889bcbdeSBjørn Mork mtk_handle_irq_tx, 0,
492785574dbfSSean Wang dev_name(eth->dev), eth);
492885574dbfSSean Wang if (err)
492985574dbfSSean Wang goto err_free_dev;
493085574dbfSSean Wang
4931889bcbdeSBjørn Mork err = devm_request_irq(eth->dev, eth->irq[2],
4932889bcbdeSBjørn Mork mtk_handle_irq_rx, 0,
493385574dbfSSean Wang dev_name(eth->dev), eth);
4934889bcbdeSBjørn Mork }
493585574dbfSSean Wang if (err)
493685574dbfSSean Wang goto err_free_dev;
493785574dbfSSean Wang
4938296c9120SStefan Roese /* No MT7628/88 support yet */
4939296c9120SStefan Roese if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
494085574dbfSSean Wang err = mtk_mdio_init(eth);
494185574dbfSSean Wang if (err)
494285574dbfSSean Wang goto err_free_dev;
4943296c9120SStefan Roese }
494485574dbfSSean Wang
4945ba37b7caSFelix Fietkau if (eth->soc->offload_version) {
4946a008e2a8SLorenzo Bianconi u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
4947329bce51SLorenzo Bianconi
49484ff1a3fcSLorenzo Bianconi num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
49494ff1a3fcSLorenzo Bianconi for (i = 0; i < num_ppe; i++) {
49504ff1a3fcSLorenzo Bianconi u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
49514ff1a3fcSLorenzo Bianconi
49523fbe4d8cSDaniel Golle eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
49533fbe4d8cSDaniel Golle
49544ff1a3fcSLorenzo Bianconi if (!eth->ppe[i]) {
49551ccc723bSFelix Fietkau err = -ENOMEM;
4956603ea5e7SYan Cangang goto err_deinit_ppe;
49571ccc723bSFelix Fietkau }
49584ff1a3fcSLorenzo Bianconi }
4959502e84e2SFelix Fietkau
4960502e84e2SFelix Fietkau err = mtk_eth_offload_init(eth);
4961502e84e2SFelix Fietkau if (err)
4962603ea5e7SYan Cangang goto err_deinit_ppe;
4963ba37b7caSFelix Fietkau }
4964ba37b7caSFelix Fietkau
496585574dbfSSean Wang for (i = 0; i < MTK_MAX_DEVS; i++) {
496685574dbfSSean Wang if (!eth->netdev[i])
496785574dbfSSean Wang continue;
496885574dbfSSean Wang
496985574dbfSSean Wang err = register_netdev(eth->netdev[i]);
497085574dbfSSean Wang if (err) {
497185574dbfSSean Wang dev_err(eth->dev, "error bringing up device\n");
4972603ea5e7SYan Cangang goto err_deinit_ppe;
497385574dbfSSean Wang } else
497485574dbfSSean Wang netif_info(eth, probe, eth->netdev[i],
497585574dbfSSean Wang "mediatek frame engine at 0x%08lx, irq %d\n",
497685574dbfSSean Wang eth->netdev[i]->base_addr, eth->irq[0]);
497785574dbfSSean Wang }
497885574dbfSSean Wang
4979656e7052SJohn Crispin /* we run 2 devices on the same DMA ring so we need a dummy device
4980656e7052SJohn Crispin * for NAPI to work
4981656e7052SJohn Crispin */
4982656e7052SJohn Crispin init_dummy_netdev(ð->dummy_dev);
4983b48b89f9SJakub Kicinski netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx);
4984b48b89f9SJakub Kicinski netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx);
4985656e7052SJohn Crispin
4986656e7052SJohn Crispin platform_set_drvdata(pdev, eth);
498793b2591aSLorenzo Bianconi schedule_delayed_work(ð->reset.monitor_work,
498893b2591aSLorenzo Bianconi MTK_DMA_MONITOR_TIMEOUT);
4989656e7052SJohn Crispin
4990656e7052SJohn Crispin return 0;
4991656e7052SJohn Crispin
4992603ea5e7SYan Cangang err_deinit_ppe:
4993603ea5e7SYan Cangang mtk_ppe_deinit(eth);
49948a8a9e89SSean Wang mtk_mdio_cleanup(eth);
4995656e7052SJohn Crispin err_free_dev:
49968a8a9e89SSean Wang mtk_free_dev(eth);
49978a8a9e89SSean Wang err_deinit_hw:
49988a8a9e89SSean Wang mtk_hw_deinit(eth);
4999b3d0d981SYang Yingliang err_wed_exit:
5000b3d0d981SYang Yingliang mtk_wed_exit();
50012a3ec7aeSDaniel Golle err_destroy_sgmii:
50022a3ec7aeSDaniel Golle mtk_sgmii_destroy(eth);
50038a8a9e89SSean Wang
5004656e7052SJohn Crispin return err;
5005656e7052SJohn Crispin }
5006656e7052SJohn Crispin
mtk_remove(struct platform_device * pdev)5007656e7052SJohn Crispin static int mtk_remove(struct platform_device *pdev)
5008656e7052SJohn Crispin {
5009656e7052SJohn Crispin struct mtk_eth *eth = platform_get_drvdata(pdev);
5010b8fc9f30SRené van Dorst struct mtk_mac *mac;
501179e9a414SSean Wang int i;
5012656e7052SJohn Crispin
501379e9a414SSean Wang /* stop all devices to make sure that dma is properly shut down */
5014e05fd627SLorenzo Bianconi for (i = 0; i < MTK_MAX_DEVS; i++) {
501579e9a414SSean Wang if (!eth->netdev[i])
501679e9a414SSean Wang continue;
501779e9a414SSean Wang mtk_stop(eth->netdev[i]);
5018b8fc9f30SRené van Dorst mac = netdev_priv(eth->netdev[i]);
5019b8fc9f30SRené van Dorst phylink_disconnect_phy(mac->phylink);
502079e9a414SSean Wang }
5021656e7052SJohn Crispin
5022b3d0d981SYang Yingliang mtk_wed_exit();
5023bf253fb7SSean Wang mtk_hw_deinit(eth);
5024656e7052SJohn Crispin
502580673029SJohn Crispin netif_napi_del(ð->tx_napi);
5026656e7052SJohn Crispin netif_napi_del(ð->rx_napi);
5027656e7052SJohn Crispin mtk_cleanup(eth);
5028e82f7148SSean Wang mtk_mdio_cleanup(eth);
5029656e7052SJohn Crispin
5030656e7052SJohn Crispin return 0;
5031656e7052SJohn Crispin }
5032656e7052SJohn Crispin
50332ec50f57SSean Wang static const struct mtk_soc_data mt2701_data = {
50348cb42714SLorenzo Bianconi .reg_map = &mtk_reg_map,
50357093f9d8SSean Wang .caps = MT7623_CAPS | MTK_HWLRO,
5036296c9120SStefan Roese .hw_features = MTK_HW_FEATURES,
5037243dc5fbSSean Wang .required_clks = MT7623_CLKS_BITMAP,
5038243dc5fbSSean Wang .required_pctl = true,
5039a008e2a8SLorenzo Bianconi .version = 1,
5040eb067347SLorenzo Bianconi .txrx = {
5041eb067347SLorenzo Bianconi .txd_size = sizeof(struct mtk_tx_dma),
5042670ff7daSLorenzo Bianconi .rxd_size = sizeof(struct mtk_rx_dma),
5043160d3a9bSLorenzo Bianconi .rx_irq_done_mask = MTK_RX_DONE_INT,
5044160d3a9bSLorenzo Bianconi .rx_dma_l4_valid = RX_DMA_L4_VALID,
5045160d3a9bSLorenzo Bianconi .dma_max_len = MTK_TX_DMA_BUF_LEN,
5046160d3a9bSLorenzo Bianconi .dma_len_offset = 16,
5047eb067347SLorenzo Bianconi },
50482ec50f57SSean Wang };
50492ec50f57SSean Wang
5050889bcbdeSBjørn Mork static const struct mtk_soc_data mt7621_data = {
50518cb42714SLorenzo Bianconi .reg_map = &mtk_reg_map,
50528efaa653SRené van Dorst .caps = MT7621_CAPS,
5053296c9120SStefan Roese .hw_features = MTK_HW_FEATURES,
5054889bcbdeSBjørn Mork .required_clks = MT7621_CLKS_BITMAP,
5055889bcbdeSBjørn Mork .required_pctl = false,
5056a008e2a8SLorenzo Bianconi .version = 1,
505771ba8e48SFelix Fietkau .offload_version = 1,
5058ba2fc48cSLorenzo Bianconi .hash_offset = 2,
5059a5dc694eSLorenzo Bianconi .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5060eb067347SLorenzo Bianconi .txrx = {
5061eb067347SLorenzo Bianconi .txd_size = sizeof(struct mtk_tx_dma),
5062670ff7daSLorenzo Bianconi .rxd_size = sizeof(struct mtk_rx_dma),
5063160d3a9bSLorenzo Bianconi .rx_irq_done_mask = MTK_RX_DONE_INT,
5064160d3a9bSLorenzo Bianconi .rx_dma_l4_valid = RX_DMA_L4_VALID,
5065160d3a9bSLorenzo Bianconi .dma_max_len = MTK_TX_DMA_BUF_LEN,
5066160d3a9bSLorenzo Bianconi .dma_len_offset = 16,
5067eb067347SLorenzo Bianconi },
5068889bcbdeSBjørn Mork };
5069889bcbdeSBjørn Mork
507042c03844SSean Wang static const struct mtk_soc_data mt7622_data = {
50718cb42714SLorenzo Bianconi .reg_map = &mtk_reg_map,
50729ffee4a8SSean Wang .ana_rgc3 = 0x2028,
50737093f9d8SSean Wang .caps = MT7622_CAPS | MTK_HWLRO,
5074296c9120SStefan Roese .hw_features = MTK_HW_FEATURES,
5075243dc5fbSSean Wang .required_clks = MT7622_CLKS_BITMAP,
5076243dc5fbSSean Wang .required_pctl = false,
5077a008e2a8SLorenzo Bianconi .version = 1,
5078ba37b7caSFelix Fietkau .offload_version = 2,
5079ba2fc48cSLorenzo Bianconi .hash_offset = 2,
50803fbe4d8cSDaniel Golle .has_accounting = true,
5081a5dc694eSLorenzo Bianconi .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5082eb067347SLorenzo Bianconi .txrx = {
5083eb067347SLorenzo Bianconi .txd_size = sizeof(struct mtk_tx_dma),
5084670ff7daSLorenzo Bianconi .rxd_size = sizeof(struct mtk_rx_dma),
5085160d3a9bSLorenzo Bianconi .rx_irq_done_mask = MTK_RX_DONE_INT,
5086160d3a9bSLorenzo Bianconi .rx_dma_l4_valid = RX_DMA_L4_VALID,
5087160d3a9bSLorenzo Bianconi .dma_max_len = MTK_TX_DMA_BUF_LEN,
5088160d3a9bSLorenzo Bianconi .dma_len_offset = 16,
5089eb067347SLorenzo Bianconi },
509042c03844SSean Wang };
509142c03844SSean Wang
50922ec50f57SSean Wang static const struct mtk_soc_data mt7623_data = {
50938cb42714SLorenzo Bianconi .reg_map = &mtk_reg_map,
50947093f9d8SSean Wang .caps = MT7623_CAPS | MTK_HWLRO,
5095296c9120SStefan Roese .hw_features = MTK_HW_FEATURES,
5096243dc5fbSSean Wang .required_clks = MT7623_CLKS_BITMAP,
5097243dc5fbSSean Wang .required_pctl = true,
5098a008e2a8SLorenzo Bianconi .version = 1,
509971ba8e48SFelix Fietkau .offload_version = 1,
5100ba2fc48cSLorenzo Bianconi .hash_offset = 2,
5101a5dc694eSLorenzo Bianconi .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
510276a4cb75SRussell King (Oracle) .disable_pll_modes = true,
5103eb067347SLorenzo Bianconi .txrx = {
5104eb067347SLorenzo Bianconi .txd_size = sizeof(struct mtk_tx_dma),
5105670ff7daSLorenzo Bianconi .rxd_size = sizeof(struct mtk_rx_dma),
5106160d3a9bSLorenzo Bianconi .rx_irq_done_mask = MTK_RX_DONE_INT,
5107160d3a9bSLorenzo Bianconi .rx_dma_l4_valid = RX_DMA_L4_VALID,
5108160d3a9bSLorenzo Bianconi .dma_max_len = MTK_TX_DMA_BUF_LEN,
5109160d3a9bSLorenzo Bianconi .dma_len_offset = 16,
5110eb067347SLorenzo Bianconi },
51112ec50f57SSean Wang };
51122ec50f57SSean Wang
5113d438e298SSean Wang static const struct mtk_soc_data mt7629_data = {
51148cb42714SLorenzo Bianconi .reg_map = &mtk_reg_map,
5115d438e298SSean Wang .ana_rgc3 = 0x128,
5116d438e298SSean Wang .caps = MT7629_CAPS | MTK_HWLRO,
5117296c9120SStefan Roese .hw_features = MTK_HW_FEATURES,
5118d438e298SSean Wang .required_clks = MT7629_CLKS_BITMAP,
5119d438e298SSean Wang .required_pctl = false,
51203fbe4d8cSDaniel Golle .has_accounting = true,
5121a008e2a8SLorenzo Bianconi .version = 1,
5122eb067347SLorenzo Bianconi .txrx = {
5123eb067347SLorenzo Bianconi .txd_size = sizeof(struct mtk_tx_dma),
5124670ff7daSLorenzo Bianconi .rxd_size = sizeof(struct mtk_rx_dma),
5125160d3a9bSLorenzo Bianconi .rx_irq_done_mask = MTK_RX_DONE_INT,
5126160d3a9bSLorenzo Bianconi .rx_dma_l4_valid = RX_DMA_L4_VALID,
5127160d3a9bSLorenzo Bianconi .dma_max_len = MTK_TX_DMA_BUF_LEN,
5128160d3a9bSLorenzo Bianconi .dma_len_offset = 16,
5129eb067347SLorenzo Bianconi },
5130d438e298SSean Wang };
5131d438e298SSean Wang
5132f5d43dddSDaniel Golle static const struct mtk_soc_data mt7981_data = {
5133f5d43dddSDaniel Golle .reg_map = &mt7986_reg_map,
5134f5d43dddSDaniel Golle .ana_rgc3 = 0x128,
5135f5d43dddSDaniel Golle .caps = MT7981_CAPS,
5136f5d43dddSDaniel Golle .hw_features = MTK_HW_FEATURES,
5137f5d43dddSDaniel Golle .required_clks = MT7981_CLKS_BITMAP,
5138f5d43dddSDaniel Golle .required_pctl = false,
5139a008e2a8SLorenzo Bianconi .version = 2,
5140f5d43dddSDaniel Golle .offload_version = 2,
5141f5d43dddSDaniel Golle .hash_offset = 4,
51423fbe4d8cSDaniel Golle .has_accounting = true,
5143a5dc694eSLorenzo Bianconi .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5144f5d43dddSDaniel Golle .txrx = {
5145f5d43dddSDaniel Golle .txd_size = sizeof(struct mtk_tx_dma_v2),
5146f5d43dddSDaniel Golle .rxd_size = sizeof(struct mtk_rx_dma_v2),
5147f5d43dddSDaniel Golle .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5148f5d43dddSDaniel Golle .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5149f5d43dddSDaniel Golle .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5150f5d43dddSDaniel Golle .dma_len_offset = 8,
5151f5d43dddSDaniel Golle },
5152f5d43dddSDaniel Golle };
5153f5d43dddSDaniel Golle
5154197c9e9bSLorenzo Bianconi static const struct mtk_soc_data mt7986_data = {
5155197c9e9bSLorenzo Bianconi .reg_map = &mt7986_reg_map,
5156197c9e9bSLorenzo Bianconi .ana_rgc3 = 0x128,
5157197c9e9bSLorenzo Bianconi .caps = MT7986_CAPS,
515803a3180eSLorenzo Bianconi .hw_features = MTK_HW_FEATURES,
5159197c9e9bSLorenzo Bianconi .required_clks = MT7986_CLKS_BITMAP,
5160197c9e9bSLorenzo Bianconi .required_pctl = false,
5161a008e2a8SLorenzo Bianconi .version = 2,
5162c9f8d736SLorenzo Bianconi .offload_version = 2,
5163ba2fc48cSLorenzo Bianconi .hash_offset = 4,
51643fbe4d8cSDaniel Golle .has_accounting = true,
5165a5dc694eSLorenzo Bianconi .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5166197c9e9bSLorenzo Bianconi .txrx = {
5167197c9e9bSLorenzo Bianconi .txd_size = sizeof(struct mtk_tx_dma_v2),
5168197c9e9bSLorenzo Bianconi .rxd_size = sizeof(struct mtk_rx_dma_v2),
5169197c9e9bSLorenzo Bianconi .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5170da6e113fSLorenzo Bianconi .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5171197c9e9bSLorenzo Bianconi .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5172197c9e9bSLorenzo Bianconi .dma_len_offset = 8,
5173197c9e9bSLorenzo Bianconi },
5174197c9e9bSLorenzo Bianconi };
5175197c9e9bSLorenzo Bianconi
5176445eb644SLorenzo Bianconi static const struct mtk_soc_data mt7988_data = {
5177445eb644SLorenzo Bianconi .reg_map = &mt7988_reg_map,
5178445eb644SLorenzo Bianconi .ana_rgc3 = 0x128,
5179445eb644SLorenzo Bianconi .caps = MT7988_CAPS,
5180445eb644SLorenzo Bianconi .hw_features = MTK_HW_FEATURES,
5181445eb644SLorenzo Bianconi .required_clks = MT7988_CLKS_BITMAP,
5182445eb644SLorenzo Bianconi .required_pctl = false,
5183445eb644SLorenzo Bianconi .version = 3,
518488efedf5SLorenzo Bianconi .offload_version = 2,
518588efedf5SLorenzo Bianconi .hash_offset = 4,
5186571e9c49SDaniel Golle .has_accounting = true,
518788efedf5SLorenzo Bianconi .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5188445eb644SLorenzo Bianconi .txrx = {
5189445eb644SLorenzo Bianconi .txd_size = sizeof(struct mtk_tx_dma_v2),
5190445eb644SLorenzo Bianconi .rxd_size = sizeof(struct mtk_rx_dma_v2),
5191445eb644SLorenzo Bianconi .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5192445eb644SLorenzo Bianconi .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5193445eb644SLorenzo Bianconi .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5194445eb644SLorenzo Bianconi .dma_len_offset = 8,
5195445eb644SLorenzo Bianconi },
5196445eb644SLorenzo Bianconi };
5197445eb644SLorenzo Bianconi
5198296c9120SStefan Roese static const struct mtk_soc_data rt5350_data = {
51998cb42714SLorenzo Bianconi .reg_map = &mt7628_reg_map,
5200296c9120SStefan Roese .caps = MT7628_CAPS,
5201296c9120SStefan Roese .hw_features = MTK_HW_FEATURES_MT7628,
5202296c9120SStefan Roese .required_clks = MT7628_CLKS_BITMAP,
5203296c9120SStefan Roese .required_pctl = false,
5204a008e2a8SLorenzo Bianconi .version = 1,
5205eb067347SLorenzo Bianconi .txrx = {
5206eb067347SLorenzo Bianconi .txd_size = sizeof(struct mtk_tx_dma),
5207670ff7daSLorenzo Bianconi .rxd_size = sizeof(struct mtk_rx_dma),
5208160d3a9bSLorenzo Bianconi .rx_irq_done_mask = MTK_RX_DONE_INT,
5209160d3a9bSLorenzo Bianconi .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5210160d3a9bSLorenzo Bianconi .dma_max_len = MTK_TX_DMA_BUF_LEN,
5211160d3a9bSLorenzo Bianconi .dma_len_offset = 16,
5212eb067347SLorenzo Bianconi },
5213296c9120SStefan Roese };
5214296c9120SStefan Roese
5215656e7052SJohn Crispin const struct of_device_id of_mtk_match[] = {
52162ec50f57SSean Wang { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5217889bcbdeSBjørn Mork { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
521842c03844SSean Wang { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
52192ec50f57SSean Wang { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5220d438e298SSean Wang { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5221f5d43dddSDaniel Golle { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5222197c9e9bSLorenzo Bianconi { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5223445eb644SLorenzo Bianconi { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5224296c9120SStefan Roese { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5225656e7052SJohn Crispin {},
5226656e7052SJohn Crispin };
52277077dc41SSean Wang MODULE_DEVICE_TABLE(of, of_mtk_match);
5228656e7052SJohn Crispin
5229656e7052SJohn Crispin static struct platform_driver mtk_driver = {
5230656e7052SJohn Crispin .probe = mtk_probe,
5231656e7052SJohn Crispin .remove = mtk_remove,
5232656e7052SJohn Crispin .driver = {
5233656e7052SJohn Crispin .name = "mtk_soc_eth",
5234656e7052SJohn Crispin .of_match_table = of_mtk_match,
5235656e7052SJohn Crispin },
5236656e7052SJohn Crispin };
5237656e7052SJohn Crispin
5238656e7052SJohn Crispin module_platform_driver(mtk_driver);
5239656e7052SJohn Crispin
5240656e7052SJohn Crispin MODULE_LICENSE("GPL");
5241656e7052SJohn Crispin MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5242656e7052SJohn Crispin MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5243