1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53
54 /* As long as the interface is active, we keep the timestamping counter enabled
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
56 * (clock jumps) when changing timestamping settings at runtime.
57 */
58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 PTP_TCR_TSCTRLSSR)
60
61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
63
64 /* Module parameters */
65 #define TX_TIMEO 5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108
109 #define STMMAC_RX_COPYBREAK 256
110
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114
115 #define STMMAC_DEFAULT_LPI_TIMER 1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122 * but allow user to force to use the chain instead of the ring
123 */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 u32 rxmode, u32 chan);
141
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 int ret = 0;
153
154 if (enabled) {
155 ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 if (ret)
157 return ret;
158 ret = clk_prepare_enable(priv->plat->pclk);
159 if (ret) {
160 clk_disable_unprepare(priv->plat->stmmac_clk);
161 return ret;
162 }
163 if (priv->plat->clks_config) {
164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 if (ret) {
166 clk_disable_unprepare(priv->plat->stmmac_clk);
167 clk_disable_unprepare(priv->plat->pclk);
168 return ret;
169 }
170 }
171 } else {
172 clk_disable_unprepare(priv->plat->stmmac_clk);
173 clk_disable_unprepare(priv->plat->pclk);
174 if (priv->plat->clks_config)
175 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 }
177
178 return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181
182 /**
183 * stmmac_verify_args - verify the driver parameters.
184 * Description: it checks the driver parameters and set a default in case of
185 * errors.
186 */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 if (unlikely(watchdog < 0))
190 watchdog = TX_TIMEO;
191 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 buf_sz = DEFAULT_BUFSIZE;
193 if (unlikely(flow_ctrl > 1))
194 flow_ctrl = FLOW_AUTO;
195 else if (likely(flow_ctrl < 0))
196 flow_ctrl = FLOW_OFF;
197 if (unlikely((pause < 0) || (pause > 0xffff)))
198 pause = PAUSE_TIME;
199 if (eee_timer < 0)
200 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202
__stmmac_disable_all_queues(struct stmmac_priv * priv)203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 u32 queue;
209
210 for (queue = 0; queue < maxq; queue++) {
211 struct stmmac_channel *ch = &priv->channel[queue];
212
213 if (stmmac_xdp_is_enabled(priv) &&
214 test_bit(queue, priv->af_xdp_zc_qps)) {
215 napi_disable(&ch->rxtx_napi);
216 continue;
217 }
218
219 if (queue < rx_queues_cnt)
220 napi_disable(&ch->rx_napi);
221 if (queue < tx_queues_cnt)
222 napi_disable(&ch->tx_napi);
223 }
224 }
225
226 /**
227 * stmmac_disable_all_queues - Disable all queues
228 * @priv: driver private structure
229 */
stmmac_disable_all_queues(struct stmmac_priv * priv)230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 struct stmmac_rx_queue *rx_q;
234 u32 queue;
235
236 /* synchronize_rcu() needed for pending XDP buffers to drain */
237 for (queue = 0; queue < rx_queues_cnt; queue++) {
238 rx_q = &priv->dma_conf.rx_queue[queue];
239 if (rx_q->xsk_pool) {
240 synchronize_rcu();
241 break;
242 }
243 }
244
245 __stmmac_disable_all_queues(priv);
246 }
247
248 /**
249 * stmmac_enable_all_queues - Enable all queues
250 * @priv: driver private structure
251 */
stmmac_enable_all_queues(struct stmmac_priv * priv)252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 u32 queue;
258
259 for (queue = 0; queue < maxq; queue++) {
260 struct stmmac_channel *ch = &priv->channel[queue];
261
262 if (stmmac_xdp_is_enabled(priv) &&
263 test_bit(queue, priv->af_xdp_zc_qps)) {
264 napi_enable(&ch->rxtx_napi);
265 continue;
266 }
267
268 if (queue < rx_queues_cnt)
269 napi_enable(&ch->rx_napi);
270 if (queue < tx_queues_cnt)
271 napi_enable(&ch->tx_napi);
272 }
273 }
274
stmmac_service_event_schedule(struct stmmac_priv * priv)275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 queue_work(priv->wq, &priv->service_task);
280 }
281
stmmac_global_err(struct stmmac_priv * priv)282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 netif_carrier_off(priv->dev);
285 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 stmmac_service_event_schedule(priv);
287 }
288
289 /**
290 * stmmac_clk_csr_set - dynamically set the MDC clock
291 * @priv: driver private structure
292 * Description: this is to dynamically set the MDC clock according to the csr
293 * clock input.
294 * Note:
295 * If a specific clk_csr value is passed from the platform
296 * this means that the CSR Clock Range selection cannot be
297 * changed at run-time and it is fixed (as reported in the driver
298 * documentation). Viceversa the driver will try to set the MDC
299 * clock dynamically according to the actual clock input.
300 */
stmmac_clk_csr_set(struct stmmac_priv * priv)301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 u32 clk_rate;
304
305 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306
307 /* Platform provided default clk_csr would be assumed valid
308 * for all other cases except for the below mentioned ones.
309 * For values higher than the IEEE 802.3 specified frequency
310 * we can not estimate the proper divider as it is not known
311 * the frequency of clk_csr_i. So we do not change the default
312 * divider.
313 */
314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 if (clk_rate < CSR_F_35M)
316 priv->clk_csr = STMMAC_CSR_20_35M;
317 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 priv->clk_csr = STMMAC_CSR_35_60M;
319 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 priv->clk_csr = STMMAC_CSR_60_100M;
321 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 priv->clk_csr = STMMAC_CSR_100_150M;
323 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 priv->clk_csr = STMMAC_CSR_150_250M;
325 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 priv->clk_csr = STMMAC_CSR_250_300M;
327 }
328
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 if (clk_rate > 160000000)
331 priv->clk_csr = 0x03;
332 else if (clk_rate > 80000000)
333 priv->clk_csr = 0x02;
334 else if (clk_rate > 40000000)
335 priv->clk_csr = 0x01;
336 else
337 priv->clk_csr = 0;
338 }
339
340 if (priv->plat->has_xgmac) {
341 if (clk_rate > 400000000)
342 priv->clk_csr = 0x5;
343 else if (clk_rate > 350000000)
344 priv->clk_csr = 0x4;
345 else if (clk_rate > 300000000)
346 priv->clk_csr = 0x3;
347 else if (clk_rate > 250000000)
348 priv->clk_csr = 0x2;
349 else if (clk_rate > 150000000)
350 priv->clk_csr = 0x1;
351 else
352 priv->clk_csr = 0x0;
353 }
354 }
355
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 u32 avail;
366
367 if (tx_q->dirty_tx > tx_q->cur_tx)
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 else
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371
372 return avail;
373 }
374
375 /**
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
379 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 u32 dirty;
384
385 if (rx_q->dirty_rx <= rx_q->cur_rx)
386 dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 else
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389
390 return dirty;
391 }
392
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 int tx_lpi_timer;
396
397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 priv->eee_sw_timer_en = en ? 0 : 1;
399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402
403 /**
404 * stmmac_enable_eee_mode - check and enter in LPI mode
405 * @priv: driver private structure
406 * Description: this function is to verify and enter in LPI mode in case of
407 * EEE.
408 */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 u32 tx_cnt = priv->plat->tx_queues_to_use;
412 u32 queue;
413
414 /* check if all TX queues have the work finished */
415 for (queue = 0; queue < tx_cnt; queue++) {
416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417
418 if (tx_q->dirty_tx != tx_q->cur_tx)
419 return -EBUSY; /* still unfinished work */
420 }
421
422 /* Check and enter in LPI mode */
423 if (!priv->tx_path_in_lpi_mode)
424 stmmac_set_eee_mode(priv, priv->hw,
425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 return 0;
427 }
428
429 /**
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
431 * @priv: driver private structure
432 * Description: this function is to exit and disable EEE in case of
433 * LPI state is true. This is called by the xmit.
434 */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 if (!priv->eee_sw_timer_en) {
438 stmmac_lpi_entry_timer_config(priv, 0);
439 return;
440 }
441
442 stmmac_reset_eee_mode(priv, priv->hw);
443 del_timer_sync(&priv->eee_ctrl_timer);
444 priv->tx_path_in_lpi_mode = false;
445 }
446
447 /**
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
449 * @t: timer_list struct containing private info
450 * Description:
451 * if there is no data transfer and if we are not in LPI state,
452 * then MAC Transmitter can be moved to LPI state.
453 */
stmmac_eee_ctrl_timer(struct timer_list * t)454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457
458 if (stmmac_enable_eee_mode(priv))
459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461
462 /**
463 * stmmac_eee_init - init EEE
464 * @priv: driver private structure
465 * Description:
466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
467 * can also manage EEE, this function enable the LPI state and start related
468 * timer.
469 */
stmmac_eee_init(struct stmmac_priv * priv)470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 int eee_tw_timer = priv->eee_tw_timer;
473
474 /* Using PCS we cannot dial with the phy registers at this stage
475 * so we do not support extra feature like EEE.
476 */
477 if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 priv->hw->pcs == STMMAC_PCS_RTBI)
479 return false;
480
481 /* Check if MAC core supports the EEE feature. */
482 if (!priv->dma_cap.eee)
483 return false;
484
485 mutex_lock(&priv->lock);
486
487 /* Check if it needs to be deactivated */
488 if (!priv->eee_active) {
489 if (priv->eee_enabled) {
490 netdev_dbg(priv->dev, "disable EEE\n");
491 stmmac_lpi_entry_timer_config(priv, 0);
492 del_timer_sync(&priv->eee_ctrl_timer);
493 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 if (priv->hw->xpcs)
495 xpcs_config_eee(priv->hw->xpcs,
496 priv->plat->mult_fact_100ns,
497 false);
498 }
499 mutex_unlock(&priv->lock);
500 return false;
501 }
502
503 if (priv->eee_active && !priv->eee_enabled) {
504 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 eee_tw_timer);
507 if (priv->hw->xpcs)
508 xpcs_config_eee(priv->hw->xpcs,
509 priv->plat->mult_fact_100ns,
510 true);
511 }
512
513 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 del_timer_sync(&priv->eee_ctrl_timer);
515 priv->tx_path_in_lpi_mode = false;
516 stmmac_lpi_entry_timer_config(priv, 1);
517 } else {
518 stmmac_lpi_entry_timer_config(priv, 0);
519 mod_timer(&priv->eee_ctrl_timer,
520 STMMAC_LPI_T(priv->tx_lpi_timer));
521 }
522
523 mutex_unlock(&priv->lock);
524 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 return true;
526 }
527
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529 * @priv: driver private structure
530 * @p : descriptor pointer
531 * @skb : the socket buffer
532 * Description :
533 * This function will read timestamp from the descriptor & pass it to stack.
534 * and also perform some sanity checks.
535 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 struct dma_desc *p, struct sk_buff *skb)
538 {
539 struct skb_shared_hwtstamps shhwtstamp;
540 bool found = false;
541 u64 ns = 0;
542
543 if (!priv->hwts_tx_en)
544 return;
545
546 /* exit if skb doesn't support hw tstamp */
547 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 return;
549
550 /* check tx tstamp status */
551 if (stmmac_get_tx_timestamp_status(priv, p)) {
552 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 found = true;
554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 found = true;
556 }
557
558 if (found) {
559 ns -= priv->plat->cdc_error_adj;
560
561 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 shhwtstamp.hwtstamp = ns_to_ktime(ns);
563
564 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 /* pass tstamp to stack */
566 skb_tstamp_tx(skb, &shhwtstamp);
567 }
568 }
569
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571 * @priv: driver private structure
572 * @p : descriptor pointer
573 * @np : next descriptor pointer
574 * @skb : the socket buffer
575 * Description :
576 * This function will read received packet's timestamp from the descriptor
577 * and pass it to stack. It also perform some sanity checks.
578 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 struct dma_desc *np, struct sk_buff *skb)
581 {
582 struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 struct dma_desc *desc = p;
584 u64 ns = 0;
585
586 if (!priv->hwts_rx_en)
587 return;
588 /* For GMAC4, the valid timestamp is from CTX next desc. */
589 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 desc = np;
591
592 /* Check if timestamp is available */
593 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595
596 ns -= priv->plat->cdc_error_adj;
597
598 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 shhwtstamp = skb_hwtstamps(skb);
600 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 } else {
603 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 }
605 }
606
607 /**
608 * stmmac_hwtstamp_set - control hardware timestamping.
609 * @dev: device pointer.
610 * @ifr: An IOCTL specific structure, that can contain a pointer to
611 * a proprietary structure used to pass information to the driver.
612 * Description:
613 * This function configures the MAC to enable/disable both outgoing(TX)
614 * and incoming(RX) packets time stamping based on user input.
615 * Return Value:
616 * 0 on success and an appropriate -ve integer on failure.
617 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 struct stmmac_priv *priv = netdev_priv(dev);
621 struct hwtstamp_config config;
622 u32 ptp_v2 = 0;
623 u32 tstamp_all = 0;
624 u32 ptp_over_ipv4_udp = 0;
625 u32 ptp_over_ipv6_udp = 0;
626 u32 ptp_over_ethernet = 0;
627 u32 snap_type_sel = 0;
628 u32 ts_master_en = 0;
629 u32 ts_event_en = 0;
630
631 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 netdev_alert(priv->dev, "No support for HW time stamping\n");
633 priv->hwts_tx_en = 0;
634 priv->hwts_rx_en = 0;
635
636 return -EOPNOTSUPP;
637 }
638
639 if (copy_from_user(&config, ifr->ifr_data,
640 sizeof(config)))
641 return -EFAULT;
642
643 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 __func__, config.flags, config.tx_type, config.rx_filter);
645
646 if (config.tx_type != HWTSTAMP_TX_OFF &&
647 config.tx_type != HWTSTAMP_TX_ON)
648 return -ERANGE;
649
650 if (priv->adv_ts) {
651 switch (config.rx_filter) {
652 case HWTSTAMP_FILTER_NONE:
653 /* time stamp no incoming packet at all */
654 config.rx_filter = HWTSTAMP_FILTER_NONE;
655 break;
656
657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 /* PTP v1, UDP, any kind of event packet */
659 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 /* 'xmac' hardware can support Sync, Pdelay_Req and
661 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 * This leaves Delay_Req timestamps out.
663 * Enable all events *and* general purpose message
664 * timestamping
665 */
666 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 break;
670
671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 /* PTP v1, UDP, Sync packet */
673 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 /* take time stamp for SYNC messages only */
675 ts_event_en = PTP_TCR_TSEVNTENA;
676
677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 break;
680
681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 /* PTP v1, UDP, Delay_req packet */
683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 /* take time stamp for Delay_Req messages only */
685 ts_master_en = PTP_TCR_TSMSTRENA;
686 ts_event_en = PTP_TCR_TSEVNTENA;
687
688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 break;
691
692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 /* PTP v2, UDP, any kind of event packet */
694 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 ptp_v2 = PTP_TCR_TSVER2ENA;
696 /* take time stamp for all event messages */
697 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698
699 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 break;
702
703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 /* PTP v2, UDP, Sync packet */
705 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 ptp_v2 = PTP_TCR_TSVER2ENA;
707 /* take time stamp for SYNC messages only */
708 ts_event_en = PTP_TCR_TSEVNTENA;
709
710 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 break;
713
714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 /* PTP v2, UDP, Delay_req packet */
716 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 ptp_v2 = PTP_TCR_TSVER2ENA;
718 /* take time stamp for Delay_Req messages only */
719 ts_master_en = PTP_TCR_TSMSTRENA;
720 ts_event_en = PTP_TCR_TSEVNTENA;
721
722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 break;
725
726 case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 /* PTP v2/802.AS1 any layer, any kind of event packet */
728 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 ptp_v2 = PTP_TCR_TSVER2ENA;
730 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 if (priv->synopsys_id < DWMAC_CORE_4_10)
732 ts_event_en = PTP_TCR_TSEVNTENA;
733 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 ptp_over_ethernet = PTP_TCR_TSIPENA;
736 break;
737
738 case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 /* PTP v2/802.AS1, any layer, Sync packet */
740 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 ptp_v2 = PTP_TCR_TSVER2ENA;
742 /* take time stamp for SYNC messages only */
743 ts_event_en = PTP_TCR_TSEVNTENA;
744
745 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 ptp_over_ethernet = PTP_TCR_TSIPENA;
748 break;
749
750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 /* PTP v2/802.AS1, any layer, Delay_req packet */
752 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 ptp_v2 = PTP_TCR_TSVER2ENA;
754 /* take time stamp for Delay_Req messages only */
755 ts_master_en = PTP_TCR_TSMSTRENA;
756 ts_event_en = PTP_TCR_TSEVNTENA;
757
758 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 ptp_over_ethernet = PTP_TCR_TSIPENA;
761 break;
762
763 case HWTSTAMP_FILTER_NTP_ALL:
764 case HWTSTAMP_FILTER_ALL:
765 /* time stamp any incoming packet */
766 config.rx_filter = HWTSTAMP_FILTER_ALL;
767 tstamp_all = PTP_TCR_TSENALL;
768 break;
769
770 default:
771 return -ERANGE;
772 }
773 } else {
774 switch (config.rx_filter) {
775 case HWTSTAMP_FILTER_NONE:
776 config.rx_filter = HWTSTAMP_FILTER_NONE;
777 break;
778 default:
779 /* PTP v1, UDP, any kind of event packet */
780 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 break;
782 }
783 }
784 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786
787 priv->systime_flags = STMMAC_HWTS_ACTIVE;
788
789 if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 priv->systime_flags |= tstamp_all | ptp_v2 |
791 ptp_over_ethernet | ptp_over_ipv6_udp |
792 ptp_over_ipv4_udp | ts_event_en |
793 ts_master_en | snap_type_sel;
794 }
795
796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797
798 memcpy(&priv->tstamp_config, &config, sizeof(config));
799
800 return copy_to_user(ifr->ifr_data, &config,
801 sizeof(config)) ? -EFAULT : 0;
802 }
803
804 /**
805 * stmmac_hwtstamp_get - read hardware timestamping.
806 * @dev: device pointer.
807 * @ifr: An IOCTL specific structure, that can contain a pointer to
808 * a proprietary structure used to pass information to the driver.
809 * Description:
810 * This function obtain the current hardware timestamping settings
811 * as requested.
812 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 struct stmmac_priv *priv = netdev_priv(dev);
816 struct hwtstamp_config *config = &priv->tstamp_config;
817
818 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 return -EOPNOTSUPP;
820
821 return copy_to_user(ifr->ifr_data, config,
822 sizeof(*config)) ? -EFAULT : 0;
823 }
824
825 /**
826 * stmmac_init_tstamp_counter - init hardware timestamping counter
827 * @priv: driver private structure
828 * @systime_flags: timestamping flags
829 * Description:
830 * Initialize hardware counter for packet timestamping.
831 * This is valid as long as the interface is open and not suspended.
832 * Will be rerun after resuming from suspend, case in which the timestamping
833 * flags updated by stmmac_hwtstamp_set() also need to be restored.
834 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 struct timespec64 now;
839 u32 sec_inc = 0;
840 u64 temp = 0;
841
842 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 return -EOPNOTSUPP;
844
845 if (!priv->plat->clk_ptp_rate) {
846 netdev_err(priv->dev, "Invalid PTP clock rate");
847 return -EINVAL;
848 }
849
850 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
851 priv->systime_flags = systime_flags;
852
853 /* program Sub Second Increment reg */
854 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
855 priv->plat->clk_ptp_rate,
856 xmac, &sec_inc);
857 temp = div_u64(1000000000ULL, sec_inc);
858
859 /* Store sub second increment for later use */
860 priv->sub_second_inc = sec_inc;
861
862 /* calculate default added value:
863 * formula is :
864 * addend = (2^32)/freq_div_ratio;
865 * where, freq_div_ratio = 1e9ns/sec_inc
866 */
867 temp = (u64)(temp << 32);
868 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
869 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
870
871 /* initialize system time */
872 ktime_get_real_ts64(&now);
873
874 /* lower 32 bits of tv_sec are safe until y2106 */
875 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
876
877 return 0;
878 }
879 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
880
881 /**
882 * stmmac_init_ptp - init PTP
883 * @priv: driver private structure
884 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
885 * This is done by looking at the HW cap. register.
886 * This function also registers the ptp driver.
887 */
stmmac_init_ptp(struct stmmac_priv * priv)888 static int stmmac_init_ptp(struct stmmac_priv *priv)
889 {
890 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
891 int ret;
892
893 if (priv->plat->ptp_clk_freq_config)
894 priv->plat->ptp_clk_freq_config(priv);
895
896 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
897 if (ret)
898 return ret;
899
900 priv->adv_ts = 0;
901 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
902 if (xmac && priv->dma_cap.atime_stamp)
903 priv->adv_ts = 1;
904 /* Dwmac 3.x core with extend_desc can support adv_ts */
905 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
906 priv->adv_ts = 1;
907
908 if (priv->dma_cap.time_stamp)
909 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
910
911 if (priv->adv_ts)
912 netdev_info(priv->dev,
913 "IEEE 1588-2008 Advanced Timestamp supported\n");
914
915 priv->hwts_tx_en = 0;
916 priv->hwts_rx_en = 0;
917
918 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
919 stmmac_hwtstamp_correct_latency(priv, priv);
920
921 return 0;
922 }
923
stmmac_release_ptp(struct stmmac_priv * priv)924 static void stmmac_release_ptp(struct stmmac_priv *priv)
925 {
926 clk_disable_unprepare(priv->plat->clk_ptp_ref);
927 stmmac_ptp_unregister(priv);
928 }
929
930 /**
931 * stmmac_mac_flow_ctrl - Configure flow control in all queues
932 * @priv: driver private structure
933 * @duplex: duplex passed to the next function
934 * Description: It is used for configuring the flow control in all queues
935 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)936 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
937 {
938 u32 tx_cnt = priv->plat->tx_queues_to_use;
939
940 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
941 priv->pause, tx_cnt);
942 }
943
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)944 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
945 phy_interface_t interface)
946 {
947 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
948
949 if (priv->hw->xpcs)
950 return &priv->hw->xpcs->pcs;
951
952 if (priv->hw->lynx_pcs)
953 return priv->hw->lynx_pcs;
954
955 return NULL;
956 }
957
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)958 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
959 const struct phylink_link_state *state)
960 {
961 /* Nothing to do, xpcs_config() handles everything */
962 }
963
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)964 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
965 {
966 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
967 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
968 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
969 bool *hs_enable = &fpe_cfg->hs_enable;
970
971 if (is_up && *hs_enable) {
972 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
973 MPACKET_VERIFY);
974 } else {
975 *lo_state = FPE_STATE_OFF;
976 *lp_state = FPE_STATE_OFF;
977 }
978 }
979
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)980 static void stmmac_mac_link_down(struct phylink_config *config,
981 unsigned int mode, phy_interface_t interface)
982 {
983 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
984
985 stmmac_mac_set(priv, priv->ioaddr, false);
986 priv->eee_active = false;
987 priv->tx_lpi_enabled = false;
988 priv->eee_enabled = stmmac_eee_init(priv);
989 stmmac_set_eee_pls(priv, priv->hw, false);
990
991 if (priv->dma_cap.fpesel)
992 stmmac_fpe_link_state_handle(priv, false);
993 }
994
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)995 static void stmmac_mac_link_up(struct phylink_config *config,
996 struct phy_device *phy,
997 unsigned int mode, phy_interface_t interface,
998 int speed, int duplex,
999 bool tx_pause, bool rx_pause)
1000 {
1001 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1002 u32 old_ctrl, ctrl;
1003
1004 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1005 priv->plat->serdes_powerup)
1006 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1007
1008 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1009 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1010
1011 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1012 switch (speed) {
1013 case SPEED_10000:
1014 ctrl |= priv->hw->link.xgmii.speed10000;
1015 break;
1016 case SPEED_5000:
1017 ctrl |= priv->hw->link.xgmii.speed5000;
1018 break;
1019 case SPEED_2500:
1020 ctrl |= priv->hw->link.xgmii.speed2500;
1021 break;
1022 default:
1023 return;
1024 }
1025 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1026 switch (speed) {
1027 case SPEED_100000:
1028 ctrl |= priv->hw->link.xlgmii.speed100000;
1029 break;
1030 case SPEED_50000:
1031 ctrl |= priv->hw->link.xlgmii.speed50000;
1032 break;
1033 case SPEED_40000:
1034 ctrl |= priv->hw->link.xlgmii.speed40000;
1035 break;
1036 case SPEED_25000:
1037 ctrl |= priv->hw->link.xlgmii.speed25000;
1038 break;
1039 case SPEED_10000:
1040 ctrl |= priv->hw->link.xgmii.speed10000;
1041 break;
1042 case SPEED_2500:
1043 ctrl |= priv->hw->link.speed2500;
1044 break;
1045 case SPEED_1000:
1046 ctrl |= priv->hw->link.speed1000;
1047 break;
1048 default:
1049 return;
1050 }
1051 } else {
1052 switch (speed) {
1053 case SPEED_2500:
1054 ctrl |= priv->hw->link.speed2500;
1055 break;
1056 case SPEED_1000:
1057 ctrl |= priv->hw->link.speed1000;
1058 break;
1059 case SPEED_100:
1060 ctrl |= priv->hw->link.speed100;
1061 break;
1062 case SPEED_10:
1063 ctrl |= priv->hw->link.speed10;
1064 break;
1065 default:
1066 return;
1067 }
1068 }
1069
1070 priv->speed = speed;
1071
1072 if (priv->plat->fix_mac_speed)
1073 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1074
1075 if (!duplex)
1076 ctrl &= ~priv->hw->link.duplex;
1077 else
1078 ctrl |= priv->hw->link.duplex;
1079
1080 /* Flow Control operation */
1081 if (rx_pause && tx_pause)
1082 priv->flow_ctrl = FLOW_AUTO;
1083 else if (rx_pause && !tx_pause)
1084 priv->flow_ctrl = FLOW_RX;
1085 else if (!rx_pause && tx_pause)
1086 priv->flow_ctrl = FLOW_TX;
1087 else
1088 priv->flow_ctrl = FLOW_OFF;
1089
1090 stmmac_mac_flow_ctrl(priv, duplex);
1091
1092 if (ctrl != old_ctrl)
1093 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1094
1095 stmmac_mac_set(priv, priv->ioaddr, true);
1096 if (phy && priv->dma_cap.eee) {
1097 priv->eee_active =
1098 phy_init_eee(phy, !(priv->plat->flags &
1099 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1100 priv->eee_enabled = stmmac_eee_init(priv);
1101 priv->tx_lpi_enabled = priv->eee_enabled;
1102 stmmac_set_eee_pls(priv, priv->hw, true);
1103 }
1104
1105 if (priv->dma_cap.fpesel)
1106 stmmac_fpe_link_state_handle(priv, true);
1107
1108 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1109 stmmac_hwtstamp_correct_latency(priv, priv);
1110 }
1111
1112 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1113 .mac_select_pcs = stmmac_mac_select_pcs,
1114 .mac_config = stmmac_mac_config,
1115 .mac_link_down = stmmac_mac_link_down,
1116 .mac_link_up = stmmac_mac_link_up,
1117 };
1118
1119 /**
1120 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1121 * @priv: driver private structure
1122 * Description: this is to verify if the HW supports the PCS.
1123 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1124 * configured for the TBI, RTBI, or SGMII PHY interface.
1125 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1126 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1127 {
1128 int interface = priv->plat->mac_interface;
1129
1130 if (priv->dma_cap.pcs) {
1131 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1132 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1133 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1134 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1135 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1136 priv->hw->pcs = STMMAC_PCS_RGMII;
1137 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1138 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1139 priv->hw->pcs = STMMAC_PCS_SGMII;
1140 }
1141 }
1142 }
1143
1144 /**
1145 * stmmac_init_phy - PHY initialization
1146 * @dev: net device structure
1147 * Description: it initializes the driver's PHY state, and attaches the PHY
1148 * to the mac driver.
1149 * Return value:
1150 * 0 on success
1151 */
stmmac_init_phy(struct net_device * dev)1152 static int stmmac_init_phy(struct net_device *dev)
1153 {
1154 struct stmmac_priv *priv = netdev_priv(dev);
1155 struct fwnode_handle *phy_fwnode;
1156 struct fwnode_handle *fwnode;
1157 int ret;
1158
1159 if (!phylink_expects_phy(priv->phylink))
1160 return 0;
1161
1162 fwnode = priv->plat->port_node;
1163 if (!fwnode)
1164 fwnode = dev_fwnode(priv->device);
1165
1166 if (fwnode)
1167 phy_fwnode = fwnode_get_phy_node(fwnode);
1168 else
1169 phy_fwnode = NULL;
1170
1171 /* Some DT bindings do not set-up the PHY handle. Let's try to
1172 * manually parse it
1173 */
1174 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1175 int addr = priv->plat->phy_addr;
1176 struct phy_device *phydev;
1177
1178 if (addr < 0) {
1179 netdev_err(priv->dev, "no phy found\n");
1180 return -ENODEV;
1181 }
1182
1183 phydev = mdiobus_get_phy(priv->mii, addr);
1184 if (!phydev) {
1185 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1186 return -ENODEV;
1187 }
1188
1189 ret = phylink_connect_phy(priv->phylink, phydev);
1190 } else {
1191 fwnode_handle_put(phy_fwnode);
1192 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1193 }
1194
1195 if (!priv->plat->pmt) {
1196 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1197
1198 phylink_ethtool_get_wol(priv->phylink, &wol);
1199 device_set_wakeup_capable(priv->device, !!wol.supported);
1200 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1201 }
1202
1203 return ret;
1204 }
1205
stmmac_phy_setup(struct stmmac_priv * priv)1206 static int stmmac_phy_setup(struct stmmac_priv *priv)
1207 {
1208 struct stmmac_mdio_bus_data *mdio_bus_data;
1209 int mode = priv->plat->phy_interface;
1210 struct fwnode_handle *fwnode;
1211 struct phylink *phylink;
1212 int max_speed;
1213
1214 priv->phylink_config.dev = &priv->dev->dev;
1215 priv->phylink_config.type = PHYLINK_NETDEV;
1216 priv->phylink_config.mac_managed_pm = true;
1217
1218 mdio_bus_data = priv->plat->mdio_bus_data;
1219 if (mdio_bus_data)
1220 priv->phylink_config.ovr_an_inband =
1221 mdio_bus_data->xpcs_an_inband;
1222
1223 /* Set the platform/firmware specified interface mode. Note, phylink
1224 * deals with the PHY interface mode, not the MAC interface mode.
1225 */
1226 __set_bit(mode, priv->phylink_config.supported_interfaces);
1227
1228 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1229 if (priv->hw->xpcs)
1230 xpcs_get_interfaces(priv->hw->xpcs,
1231 priv->phylink_config.supported_interfaces);
1232
1233 /* Get the MAC specific capabilities */
1234 stmmac_mac_phylink_get_caps(priv);
1235
1236 priv->phylink_config.mac_capabilities = priv->hw->link.caps;
1237
1238 max_speed = priv->plat->max_speed;
1239 if (max_speed)
1240 phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1241
1242 fwnode = priv->plat->port_node;
1243 if (!fwnode)
1244 fwnode = dev_fwnode(priv->device);
1245
1246 phylink = phylink_create(&priv->phylink_config, fwnode,
1247 mode, &stmmac_phylink_mac_ops);
1248 if (IS_ERR(phylink))
1249 return PTR_ERR(phylink);
1250
1251 priv->phylink = phylink;
1252 return 0;
1253 }
1254
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1255 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1256 struct stmmac_dma_conf *dma_conf)
1257 {
1258 u32 rx_cnt = priv->plat->rx_queues_to_use;
1259 unsigned int desc_size;
1260 void *head_rx;
1261 u32 queue;
1262
1263 /* Display RX rings */
1264 for (queue = 0; queue < rx_cnt; queue++) {
1265 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1266
1267 pr_info("\tRX Queue %u rings\n", queue);
1268
1269 if (priv->extend_desc) {
1270 head_rx = (void *)rx_q->dma_erx;
1271 desc_size = sizeof(struct dma_extended_desc);
1272 } else {
1273 head_rx = (void *)rx_q->dma_rx;
1274 desc_size = sizeof(struct dma_desc);
1275 }
1276
1277 /* Display RX ring */
1278 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1279 rx_q->dma_rx_phy, desc_size);
1280 }
1281 }
1282
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1283 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1284 struct stmmac_dma_conf *dma_conf)
1285 {
1286 u32 tx_cnt = priv->plat->tx_queues_to_use;
1287 unsigned int desc_size;
1288 void *head_tx;
1289 u32 queue;
1290
1291 /* Display TX rings */
1292 for (queue = 0; queue < tx_cnt; queue++) {
1293 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1294
1295 pr_info("\tTX Queue %d rings\n", queue);
1296
1297 if (priv->extend_desc) {
1298 head_tx = (void *)tx_q->dma_etx;
1299 desc_size = sizeof(struct dma_extended_desc);
1300 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1301 head_tx = (void *)tx_q->dma_entx;
1302 desc_size = sizeof(struct dma_edesc);
1303 } else {
1304 head_tx = (void *)tx_q->dma_tx;
1305 desc_size = sizeof(struct dma_desc);
1306 }
1307
1308 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1309 tx_q->dma_tx_phy, desc_size);
1310 }
1311 }
1312
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1313 static void stmmac_display_rings(struct stmmac_priv *priv,
1314 struct stmmac_dma_conf *dma_conf)
1315 {
1316 /* Display RX ring */
1317 stmmac_display_rx_rings(priv, dma_conf);
1318
1319 /* Display TX ring */
1320 stmmac_display_tx_rings(priv, dma_conf);
1321 }
1322
stmmac_set_bfsize(int mtu,int bufsize)1323 static int stmmac_set_bfsize(int mtu, int bufsize)
1324 {
1325 int ret = bufsize;
1326
1327 if (mtu >= BUF_SIZE_8KiB)
1328 ret = BUF_SIZE_16KiB;
1329 else if (mtu >= BUF_SIZE_4KiB)
1330 ret = BUF_SIZE_8KiB;
1331 else if (mtu >= BUF_SIZE_2KiB)
1332 ret = BUF_SIZE_4KiB;
1333 else if (mtu > DEFAULT_BUFSIZE)
1334 ret = BUF_SIZE_2KiB;
1335 else
1336 ret = DEFAULT_BUFSIZE;
1337
1338 return ret;
1339 }
1340
1341 /**
1342 * stmmac_clear_rx_descriptors - clear RX descriptors
1343 * @priv: driver private structure
1344 * @dma_conf: structure to take the dma data
1345 * @queue: RX queue index
1346 * Description: this function is called to clear the RX descriptors
1347 * in case of both basic and extended descriptors are used.
1348 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1349 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1350 struct stmmac_dma_conf *dma_conf,
1351 u32 queue)
1352 {
1353 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1354 int i;
1355
1356 /* Clear the RX descriptors */
1357 for (i = 0; i < dma_conf->dma_rx_size; i++)
1358 if (priv->extend_desc)
1359 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1360 priv->use_riwt, priv->mode,
1361 (i == dma_conf->dma_rx_size - 1),
1362 dma_conf->dma_buf_sz);
1363 else
1364 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1365 priv->use_riwt, priv->mode,
1366 (i == dma_conf->dma_rx_size - 1),
1367 dma_conf->dma_buf_sz);
1368 }
1369
1370 /**
1371 * stmmac_clear_tx_descriptors - clear tx descriptors
1372 * @priv: driver private structure
1373 * @dma_conf: structure to take the dma data
1374 * @queue: TX queue index.
1375 * Description: this function is called to clear the TX descriptors
1376 * in case of both basic and extended descriptors are used.
1377 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1378 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1379 struct stmmac_dma_conf *dma_conf,
1380 u32 queue)
1381 {
1382 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1383 int i;
1384
1385 /* Clear the TX descriptors */
1386 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1387 int last = (i == (dma_conf->dma_tx_size - 1));
1388 struct dma_desc *p;
1389
1390 if (priv->extend_desc)
1391 p = &tx_q->dma_etx[i].basic;
1392 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1393 p = &tx_q->dma_entx[i].basic;
1394 else
1395 p = &tx_q->dma_tx[i];
1396
1397 stmmac_init_tx_desc(priv, p, priv->mode, last);
1398 }
1399 }
1400
1401 /**
1402 * stmmac_clear_descriptors - clear descriptors
1403 * @priv: driver private structure
1404 * @dma_conf: structure to take the dma data
1405 * Description: this function is called to clear the TX and RX descriptors
1406 * in case of both basic and extended descriptors are used.
1407 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1408 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1409 struct stmmac_dma_conf *dma_conf)
1410 {
1411 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1412 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1413 u32 queue;
1414
1415 /* Clear the RX descriptors */
1416 for (queue = 0; queue < rx_queue_cnt; queue++)
1417 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1418
1419 /* Clear the TX descriptors */
1420 for (queue = 0; queue < tx_queue_cnt; queue++)
1421 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1422 }
1423
1424 /**
1425 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1426 * @priv: driver private structure
1427 * @dma_conf: structure to take the dma data
1428 * @p: descriptor pointer
1429 * @i: descriptor index
1430 * @flags: gfp flag
1431 * @queue: RX queue index
1432 * Description: this function is called to allocate a receive buffer, perform
1433 * the DMA mapping and init the descriptor.
1434 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1435 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1436 struct stmmac_dma_conf *dma_conf,
1437 struct dma_desc *p,
1438 int i, gfp_t flags, u32 queue)
1439 {
1440 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1441 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1442 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1443
1444 if (priv->dma_cap.host_dma_width <= 32)
1445 gfp |= GFP_DMA32;
1446
1447 if (!buf->page) {
1448 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1449 if (!buf->page)
1450 return -ENOMEM;
1451 buf->page_offset = stmmac_rx_offset(priv);
1452 }
1453
1454 if (priv->sph && !buf->sec_page) {
1455 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1456 if (!buf->sec_page)
1457 return -ENOMEM;
1458
1459 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1460 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1461 } else {
1462 buf->sec_page = NULL;
1463 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1464 }
1465
1466 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1467
1468 stmmac_set_desc_addr(priv, p, buf->addr);
1469 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1470 stmmac_init_desc3(priv, p);
1471
1472 return 0;
1473 }
1474
1475 /**
1476 * stmmac_free_rx_buffer - free RX dma buffers
1477 * @priv: private structure
1478 * @rx_q: RX queue
1479 * @i: buffer index.
1480 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1481 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1482 struct stmmac_rx_queue *rx_q,
1483 int i)
1484 {
1485 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1486
1487 if (buf->page)
1488 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1489 buf->page = NULL;
1490
1491 if (buf->sec_page)
1492 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1493 buf->sec_page = NULL;
1494 }
1495
1496 /**
1497 * stmmac_free_tx_buffer - free RX dma buffers
1498 * @priv: private structure
1499 * @dma_conf: structure to take the dma data
1500 * @queue: RX queue index
1501 * @i: buffer index.
1502 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1503 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1504 struct stmmac_dma_conf *dma_conf,
1505 u32 queue, int i)
1506 {
1507 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1508
1509 if (tx_q->tx_skbuff_dma[i].buf &&
1510 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1511 if (tx_q->tx_skbuff_dma[i].map_as_page)
1512 dma_unmap_page(priv->device,
1513 tx_q->tx_skbuff_dma[i].buf,
1514 tx_q->tx_skbuff_dma[i].len,
1515 DMA_TO_DEVICE);
1516 else
1517 dma_unmap_single(priv->device,
1518 tx_q->tx_skbuff_dma[i].buf,
1519 tx_q->tx_skbuff_dma[i].len,
1520 DMA_TO_DEVICE);
1521 }
1522
1523 if (tx_q->xdpf[i] &&
1524 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1525 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1526 xdp_return_frame(tx_q->xdpf[i]);
1527 tx_q->xdpf[i] = NULL;
1528 }
1529
1530 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1531 tx_q->xsk_frames_done++;
1532
1533 if (tx_q->tx_skbuff[i] &&
1534 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1535 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1536 tx_q->tx_skbuff[i] = NULL;
1537 }
1538
1539 tx_q->tx_skbuff_dma[i].buf = 0;
1540 tx_q->tx_skbuff_dma[i].map_as_page = false;
1541 }
1542
1543 /**
1544 * dma_free_rx_skbufs - free RX dma buffers
1545 * @priv: private structure
1546 * @dma_conf: structure to take the dma data
1547 * @queue: RX queue index
1548 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1549 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1550 struct stmmac_dma_conf *dma_conf,
1551 u32 queue)
1552 {
1553 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1554 int i;
1555
1556 for (i = 0; i < dma_conf->dma_rx_size; i++)
1557 stmmac_free_rx_buffer(priv, rx_q, i);
1558 }
1559
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1560 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1561 struct stmmac_dma_conf *dma_conf,
1562 u32 queue, gfp_t flags)
1563 {
1564 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1565 int i;
1566
1567 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1568 struct dma_desc *p;
1569 int ret;
1570
1571 if (priv->extend_desc)
1572 p = &((rx_q->dma_erx + i)->basic);
1573 else
1574 p = rx_q->dma_rx + i;
1575
1576 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1577 queue);
1578 if (ret)
1579 return ret;
1580
1581 rx_q->buf_alloc_num++;
1582 }
1583
1584 return 0;
1585 }
1586
1587 /**
1588 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1589 * @priv: private structure
1590 * @dma_conf: structure to take the dma data
1591 * @queue: RX queue index
1592 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1593 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1594 struct stmmac_dma_conf *dma_conf,
1595 u32 queue)
1596 {
1597 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1598 int i;
1599
1600 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1601 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1602
1603 if (!buf->xdp)
1604 continue;
1605
1606 xsk_buff_free(buf->xdp);
1607 buf->xdp = NULL;
1608 }
1609 }
1610
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1611 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1612 struct stmmac_dma_conf *dma_conf,
1613 u32 queue)
1614 {
1615 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1616 int i;
1617
1618 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1619 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1620 * use this macro to make sure no size violations.
1621 */
1622 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1623
1624 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1625 struct stmmac_rx_buffer *buf;
1626 dma_addr_t dma_addr;
1627 struct dma_desc *p;
1628
1629 if (priv->extend_desc)
1630 p = (struct dma_desc *)(rx_q->dma_erx + i);
1631 else
1632 p = rx_q->dma_rx + i;
1633
1634 buf = &rx_q->buf_pool[i];
1635
1636 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1637 if (!buf->xdp)
1638 return -ENOMEM;
1639
1640 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1641 stmmac_set_desc_addr(priv, p, dma_addr);
1642 rx_q->buf_alloc_num++;
1643 }
1644
1645 return 0;
1646 }
1647
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1648 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1649 {
1650 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1651 return NULL;
1652
1653 return xsk_get_pool_from_qid(priv->dev, queue);
1654 }
1655
1656 /**
1657 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1658 * @priv: driver private structure
1659 * @dma_conf: structure to take the dma data
1660 * @queue: RX queue index
1661 * @flags: gfp flag.
1662 * Description: this function initializes the DMA RX descriptors
1663 * and allocates the socket buffers. It supports the chained and ring
1664 * modes.
1665 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1666 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1667 struct stmmac_dma_conf *dma_conf,
1668 u32 queue, gfp_t flags)
1669 {
1670 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1671 int ret;
1672
1673 netif_dbg(priv, probe, priv->dev,
1674 "(%s) dma_rx_phy=0x%08x\n", __func__,
1675 (u32)rx_q->dma_rx_phy);
1676
1677 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1678
1679 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1680
1681 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1682
1683 if (rx_q->xsk_pool) {
1684 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1685 MEM_TYPE_XSK_BUFF_POOL,
1686 NULL));
1687 netdev_info(priv->dev,
1688 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1689 rx_q->queue_index);
1690 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1691 } else {
1692 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1693 MEM_TYPE_PAGE_POOL,
1694 rx_q->page_pool));
1695 netdev_info(priv->dev,
1696 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1697 rx_q->queue_index);
1698 }
1699
1700 if (rx_q->xsk_pool) {
1701 /* RX XDP ZC buffer pool may not be populated, e.g.
1702 * xdpsock TX-only.
1703 */
1704 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1705 } else {
1706 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1707 if (ret < 0)
1708 return -ENOMEM;
1709 }
1710
1711 /* Setup the chained descriptor addresses */
1712 if (priv->mode == STMMAC_CHAIN_MODE) {
1713 if (priv->extend_desc)
1714 stmmac_mode_init(priv, rx_q->dma_erx,
1715 rx_q->dma_rx_phy,
1716 dma_conf->dma_rx_size, 1);
1717 else
1718 stmmac_mode_init(priv, rx_q->dma_rx,
1719 rx_q->dma_rx_phy,
1720 dma_conf->dma_rx_size, 0);
1721 }
1722
1723 return 0;
1724 }
1725
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1726 static int init_dma_rx_desc_rings(struct net_device *dev,
1727 struct stmmac_dma_conf *dma_conf,
1728 gfp_t flags)
1729 {
1730 struct stmmac_priv *priv = netdev_priv(dev);
1731 u32 rx_count = priv->plat->rx_queues_to_use;
1732 int queue;
1733 int ret;
1734
1735 /* RX INITIALIZATION */
1736 netif_dbg(priv, probe, priv->dev,
1737 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1738
1739 for (queue = 0; queue < rx_count; queue++) {
1740 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1741 if (ret)
1742 goto err_init_rx_buffers;
1743 }
1744
1745 return 0;
1746
1747 err_init_rx_buffers:
1748 while (queue >= 0) {
1749 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1750
1751 if (rx_q->xsk_pool)
1752 dma_free_rx_xskbufs(priv, dma_conf, queue);
1753 else
1754 dma_free_rx_skbufs(priv, dma_conf, queue);
1755
1756 rx_q->buf_alloc_num = 0;
1757 rx_q->xsk_pool = NULL;
1758
1759 queue--;
1760 }
1761
1762 return ret;
1763 }
1764
1765 /**
1766 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1767 * @priv: driver private structure
1768 * @dma_conf: structure to take the dma data
1769 * @queue: TX queue index
1770 * Description: this function initializes the DMA TX descriptors
1771 * and allocates the socket buffers. It supports the chained and ring
1772 * modes.
1773 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1774 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1775 struct stmmac_dma_conf *dma_conf,
1776 u32 queue)
1777 {
1778 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1779 int i;
1780
1781 netif_dbg(priv, probe, priv->dev,
1782 "(%s) dma_tx_phy=0x%08x\n", __func__,
1783 (u32)tx_q->dma_tx_phy);
1784
1785 /* Setup the chained descriptor addresses */
1786 if (priv->mode == STMMAC_CHAIN_MODE) {
1787 if (priv->extend_desc)
1788 stmmac_mode_init(priv, tx_q->dma_etx,
1789 tx_q->dma_tx_phy,
1790 dma_conf->dma_tx_size, 1);
1791 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1792 stmmac_mode_init(priv, tx_q->dma_tx,
1793 tx_q->dma_tx_phy,
1794 dma_conf->dma_tx_size, 0);
1795 }
1796
1797 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1798
1799 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1800 struct dma_desc *p;
1801
1802 if (priv->extend_desc)
1803 p = &((tx_q->dma_etx + i)->basic);
1804 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1805 p = &((tx_q->dma_entx + i)->basic);
1806 else
1807 p = tx_q->dma_tx + i;
1808
1809 stmmac_clear_desc(priv, p);
1810
1811 tx_q->tx_skbuff_dma[i].buf = 0;
1812 tx_q->tx_skbuff_dma[i].map_as_page = false;
1813 tx_q->tx_skbuff_dma[i].len = 0;
1814 tx_q->tx_skbuff_dma[i].last_segment = false;
1815 tx_q->tx_skbuff[i] = NULL;
1816 }
1817
1818 return 0;
1819 }
1820
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1821 static int init_dma_tx_desc_rings(struct net_device *dev,
1822 struct stmmac_dma_conf *dma_conf)
1823 {
1824 struct stmmac_priv *priv = netdev_priv(dev);
1825 u32 tx_queue_cnt;
1826 u32 queue;
1827
1828 tx_queue_cnt = priv->plat->tx_queues_to_use;
1829
1830 for (queue = 0; queue < tx_queue_cnt; queue++)
1831 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1832
1833 return 0;
1834 }
1835
1836 /**
1837 * init_dma_desc_rings - init the RX/TX descriptor rings
1838 * @dev: net device structure
1839 * @dma_conf: structure to take the dma data
1840 * @flags: gfp flag.
1841 * Description: this function initializes the DMA RX/TX descriptors
1842 * and allocates the socket buffers. It supports the chained and ring
1843 * modes.
1844 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1845 static int init_dma_desc_rings(struct net_device *dev,
1846 struct stmmac_dma_conf *dma_conf,
1847 gfp_t flags)
1848 {
1849 struct stmmac_priv *priv = netdev_priv(dev);
1850 int ret;
1851
1852 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1853 if (ret)
1854 return ret;
1855
1856 ret = init_dma_tx_desc_rings(dev, dma_conf);
1857
1858 stmmac_clear_descriptors(priv, dma_conf);
1859
1860 if (netif_msg_hw(priv))
1861 stmmac_display_rings(priv, dma_conf);
1862
1863 return ret;
1864 }
1865
1866 /**
1867 * dma_free_tx_skbufs - free TX dma buffers
1868 * @priv: private structure
1869 * @dma_conf: structure to take the dma data
1870 * @queue: TX queue index
1871 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1872 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1873 struct stmmac_dma_conf *dma_conf,
1874 u32 queue)
1875 {
1876 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1877 int i;
1878
1879 tx_q->xsk_frames_done = 0;
1880
1881 for (i = 0; i < dma_conf->dma_tx_size; i++)
1882 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1883
1884 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1885 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1886 tx_q->xsk_frames_done = 0;
1887 tx_q->xsk_pool = NULL;
1888 }
1889 }
1890
1891 /**
1892 * stmmac_free_tx_skbufs - free TX skb buffers
1893 * @priv: private structure
1894 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1895 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1896 {
1897 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1898 u32 queue;
1899
1900 for (queue = 0; queue < tx_queue_cnt; queue++)
1901 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1902 }
1903
1904 /**
1905 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1906 * @priv: private structure
1907 * @dma_conf: structure to take the dma data
1908 * @queue: RX queue index
1909 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1910 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1911 struct stmmac_dma_conf *dma_conf,
1912 u32 queue)
1913 {
1914 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1915
1916 /* Release the DMA RX socket buffers */
1917 if (rx_q->xsk_pool)
1918 dma_free_rx_xskbufs(priv, dma_conf, queue);
1919 else
1920 dma_free_rx_skbufs(priv, dma_conf, queue);
1921
1922 rx_q->buf_alloc_num = 0;
1923 rx_q->xsk_pool = NULL;
1924
1925 /* Free DMA regions of consistent memory previously allocated */
1926 if (!priv->extend_desc)
1927 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1928 sizeof(struct dma_desc),
1929 rx_q->dma_rx, rx_q->dma_rx_phy);
1930 else
1931 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1932 sizeof(struct dma_extended_desc),
1933 rx_q->dma_erx, rx_q->dma_rx_phy);
1934
1935 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1936 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1937
1938 kfree(rx_q->buf_pool);
1939 if (rx_q->page_pool)
1940 page_pool_destroy(rx_q->page_pool);
1941 }
1942
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1943 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1944 struct stmmac_dma_conf *dma_conf)
1945 {
1946 u32 rx_count = priv->plat->rx_queues_to_use;
1947 u32 queue;
1948
1949 /* Free RX queue resources */
1950 for (queue = 0; queue < rx_count; queue++)
1951 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1952 }
1953
1954 /**
1955 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1956 * @priv: private structure
1957 * @dma_conf: structure to take the dma data
1958 * @queue: TX queue index
1959 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1960 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1961 struct stmmac_dma_conf *dma_conf,
1962 u32 queue)
1963 {
1964 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1965 size_t size;
1966 void *addr;
1967
1968 /* Release the DMA TX socket buffers */
1969 dma_free_tx_skbufs(priv, dma_conf, queue);
1970
1971 if (priv->extend_desc) {
1972 size = sizeof(struct dma_extended_desc);
1973 addr = tx_q->dma_etx;
1974 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1975 size = sizeof(struct dma_edesc);
1976 addr = tx_q->dma_entx;
1977 } else {
1978 size = sizeof(struct dma_desc);
1979 addr = tx_q->dma_tx;
1980 }
1981
1982 size *= dma_conf->dma_tx_size;
1983
1984 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1985
1986 kfree(tx_q->tx_skbuff_dma);
1987 kfree(tx_q->tx_skbuff);
1988 }
1989
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1990 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1991 struct stmmac_dma_conf *dma_conf)
1992 {
1993 u32 tx_count = priv->plat->tx_queues_to_use;
1994 u32 queue;
1995
1996 /* Free TX queue resources */
1997 for (queue = 0; queue < tx_count; queue++)
1998 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1999 }
2000
2001 /**
2002 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2003 * @priv: private structure
2004 * @dma_conf: structure to take the dma data
2005 * @queue: RX queue index
2006 * Description: according to which descriptor can be used (extend or basic)
2007 * this function allocates the resources for TX and RX paths. In case of
2008 * reception, for example, it pre-allocated the RX socket buffer in order to
2009 * allow zero-copy mechanism.
2010 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2011 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2012 struct stmmac_dma_conf *dma_conf,
2013 u32 queue)
2014 {
2015 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2016 struct stmmac_channel *ch = &priv->channel[queue];
2017 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2018 struct page_pool_params pp_params = { 0 };
2019 unsigned int num_pages;
2020 unsigned int napi_id;
2021 int ret;
2022
2023 rx_q->queue_index = queue;
2024 rx_q->priv_data = priv;
2025
2026 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2027 pp_params.pool_size = dma_conf->dma_rx_size;
2028 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2029 pp_params.order = ilog2(num_pages);
2030 pp_params.nid = dev_to_node(priv->device);
2031 pp_params.dev = priv->device;
2032 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2033 pp_params.offset = stmmac_rx_offset(priv);
2034 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2035
2036 rx_q->page_pool = page_pool_create(&pp_params);
2037 if (IS_ERR(rx_q->page_pool)) {
2038 ret = PTR_ERR(rx_q->page_pool);
2039 rx_q->page_pool = NULL;
2040 return ret;
2041 }
2042
2043 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2044 sizeof(*rx_q->buf_pool),
2045 GFP_KERNEL);
2046 if (!rx_q->buf_pool)
2047 return -ENOMEM;
2048
2049 if (priv->extend_desc) {
2050 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2051 dma_conf->dma_rx_size *
2052 sizeof(struct dma_extended_desc),
2053 &rx_q->dma_rx_phy,
2054 GFP_KERNEL);
2055 if (!rx_q->dma_erx)
2056 return -ENOMEM;
2057
2058 } else {
2059 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2060 dma_conf->dma_rx_size *
2061 sizeof(struct dma_desc),
2062 &rx_q->dma_rx_phy,
2063 GFP_KERNEL);
2064 if (!rx_q->dma_rx)
2065 return -ENOMEM;
2066 }
2067
2068 if (stmmac_xdp_is_enabled(priv) &&
2069 test_bit(queue, priv->af_xdp_zc_qps))
2070 napi_id = ch->rxtx_napi.napi_id;
2071 else
2072 napi_id = ch->rx_napi.napi_id;
2073
2074 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2075 rx_q->queue_index,
2076 napi_id);
2077 if (ret) {
2078 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2079 return -EINVAL;
2080 }
2081
2082 return 0;
2083 }
2084
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2085 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2086 struct stmmac_dma_conf *dma_conf)
2087 {
2088 u32 rx_count = priv->plat->rx_queues_to_use;
2089 u32 queue;
2090 int ret;
2091
2092 /* RX queues buffers and DMA */
2093 for (queue = 0; queue < rx_count; queue++) {
2094 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2095 if (ret)
2096 goto err_dma;
2097 }
2098
2099 return 0;
2100
2101 err_dma:
2102 free_dma_rx_desc_resources(priv, dma_conf);
2103
2104 return ret;
2105 }
2106
2107 /**
2108 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2109 * @priv: private structure
2110 * @dma_conf: structure to take the dma data
2111 * @queue: TX queue index
2112 * Description: according to which descriptor can be used (extend or basic)
2113 * this function allocates the resources for TX and RX paths. In case of
2114 * reception, for example, it pre-allocated the RX socket buffer in order to
2115 * allow zero-copy mechanism.
2116 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2117 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2118 struct stmmac_dma_conf *dma_conf,
2119 u32 queue)
2120 {
2121 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2122 size_t size;
2123 void *addr;
2124
2125 tx_q->queue_index = queue;
2126 tx_q->priv_data = priv;
2127
2128 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2129 sizeof(*tx_q->tx_skbuff_dma),
2130 GFP_KERNEL);
2131 if (!tx_q->tx_skbuff_dma)
2132 return -ENOMEM;
2133
2134 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2135 sizeof(struct sk_buff *),
2136 GFP_KERNEL);
2137 if (!tx_q->tx_skbuff)
2138 return -ENOMEM;
2139
2140 if (priv->extend_desc)
2141 size = sizeof(struct dma_extended_desc);
2142 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2143 size = sizeof(struct dma_edesc);
2144 else
2145 size = sizeof(struct dma_desc);
2146
2147 size *= dma_conf->dma_tx_size;
2148
2149 addr = dma_alloc_coherent(priv->device, size,
2150 &tx_q->dma_tx_phy, GFP_KERNEL);
2151 if (!addr)
2152 return -ENOMEM;
2153
2154 if (priv->extend_desc)
2155 tx_q->dma_etx = addr;
2156 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2157 tx_q->dma_entx = addr;
2158 else
2159 tx_q->dma_tx = addr;
2160
2161 return 0;
2162 }
2163
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2164 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2165 struct stmmac_dma_conf *dma_conf)
2166 {
2167 u32 tx_count = priv->plat->tx_queues_to_use;
2168 u32 queue;
2169 int ret;
2170
2171 /* TX queues buffers and DMA */
2172 for (queue = 0; queue < tx_count; queue++) {
2173 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2174 if (ret)
2175 goto err_dma;
2176 }
2177
2178 return 0;
2179
2180 err_dma:
2181 free_dma_tx_desc_resources(priv, dma_conf);
2182 return ret;
2183 }
2184
2185 /**
2186 * alloc_dma_desc_resources - alloc TX/RX resources.
2187 * @priv: private structure
2188 * @dma_conf: structure to take the dma data
2189 * Description: according to which descriptor can be used (extend or basic)
2190 * this function allocates the resources for TX and RX paths. In case of
2191 * reception, for example, it pre-allocated the RX socket buffer in order to
2192 * allow zero-copy mechanism.
2193 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2194 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2195 struct stmmac_dma_conf *dma_conf)
2196 {
2197 /* RX Allocation */
2198 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2199
2200 if (ret)
2201 return ret;
2202
2203 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2204
2205 return ret;
2206 }
2207
2208 /**
2209 * free_dma_desc_resources - free dma desc resources
2210 * @priv: private structure
2211 * @dma_conf: structure to take the dma data
2212 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2213 static void free_dma_desc_resources(struct stmmac_priv *priv,
2214 struct stmmac_dma_conf *dma_conf)
2215 {
2216 /* Release the DMA TX socket buffers */
2217 free_dma_tx_desc_resources(priv, dma_conf);
2218
2219 /* Release the DMA RX socket buffers later
2220 * to ensure all pending XDP_TX buffers are returned.
2221 */
2222 free_dma_rx_desc_resources(priv, dma_conf);
2223 }
2224
2225 /**
2226 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2227 * @priv: driver private structure
2228 * Description: It is used for enabling the rx queues in the MAC
2229 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2230 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2231 {
2232 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2233 int queue;
2234 u8 mode;
2235
2236 for (queue = 0; queue < rx_queues_count; queue++) {
2237 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2238 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2239 }
2240 }
2241
2242 /**
2243 * stmmac_start_rx_dma - start RX DMA channel
2244 * @priv: driver private structure
2245 * @chan: RX channel index
2246 * Description:
2247 * This starts a RX DMA channel
2248 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2249 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2250 {
2251 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2252 stmmac_start_rx(priv, priv->ioaddr, chan);
2253 }
2254
2255 /**
2256 * stmmac_start_tx_dma - start TX DMA channel
2257 * @priv: driver private structure
2258 * @chan: TX channel index
2259 * Description:
2260 * This starts a TX DMA channel
2261 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2262 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2263 {
2264 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2265 stmmac_start_tx(priv, priv->ioaddr, chan);
2266 }
2267
2268 /**
2269 * stmmac_stop_rx_dma - stop RX DMA channel
2270 * @priv: driver private structure
2271 * @chan: RX channel index
2272 * Description:
2273 * This stops a RX DMA channel
2274 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2275 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2276 {
2277 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2278 stmmac_stop_rx(priv, priv->ioaddr, chan);
2279 }
2280
2281 /**
2282 * stmmac_stop_tx_dma - stop TX DMA channel
2283 * @priv: driver private structure
2284 * @chan: TX channel index
2285 * Description:
2286 * This stops a TX DMA channel
2287 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2288 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2289 {
2290 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2291 stmmac_stop_tx(priv, priv->ioaddr, chan);
2292 }
2293
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2294 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2295 {
2296 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2297 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2298 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2299 u32 chan;
2300
2301 for (chan = 0; chan < dma_csr_ch; chan++) {
2302 struct stmmac_channel *ch = &priv->channel[chan];
2303 unsigned long flags;
2304
2305 spin_lock_irqsave(&ch->lock, flags);
2306 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2307 spin_unlock_irqrestore(&ch->lock, flags);
2308 }
2309 }
2310
2311 /**
2312 * stmmac_start_all_dma - start all RX and TX DMA channels
2313 * @priv: driver private structure
2314 * Description:
2315 * This starts all the RX and TX DMA channels
2316 */
stmmac_start_all_dma(struct stmmac_priv * priv)2317 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2318 {
2319 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2320 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2321 u32 chan = 0;
2322
2323 for (chan = 0; chan < rx_channels_count; chan++)
2324 stmmac_start_rx_dma(priv, chan);
2325
2326 for (chan = 0; chan < tx_channels_count; chan++)
2327 stmmac_start_tx_dma(priv, chan);
2328 }
2329
2330 /**
2331 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2332 * @priv: driver private structure
2333 * Description:
2334 * This stops the RX and TX DMA channels
2335 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2336 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2337 {
2338 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2339 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2340 u32 chan = 0;
2341
2342 for (chan = 0; chan < rx_channels_count; chan++)
2343 stmmac_stop_rx_dma(priv, chan);
2344
2345 for (chan = 0; chan < tx_channels_count; chan++)
2346 stmmac_stop_tx_dma(priv, chan);
2347 }
2348
2349 /**
2350 * stmmac_dma_operation_mode - HW DMA operation mode
2351 * @priv: driver private structure
2352 * Description: it is used for configuring the DMA operation mode register in
2353 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2354 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2355 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2356 {
2357 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2358 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2359 int rxfifosz = priv->plat->rx_fifo_size;
2360 int txfifosz = priv->plat->tx_fifo_size;
2361 u32 txmode = 0;
2362 u32 rxmode = 0;
2363 u32 chan = 0;
2364 u8 qmode = 0;
2365
2366 if (rxfifosz == 0)
2367 rxfifosz = priv->dma_cap.rx_fifo_size;
2368 if (txfifosz == 0)
2369 txfifosz = priv->dma_cap.tx_fifo_size;
2370
2371 /* Adjust for real per queue fifo size */
2372 rxfifosz /= rx_channels_count;
2373 txfifosz /= tx_channels_count;
2374
2375 if (priv->plat->force_thresh_dma_mode) {
2376 txmode = tc;
2377 rxmode = tc;
2378 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2379 /*
2380 * In case of GMAC, SF mode can be enabled
2381 * to perform the TX COE in HW. This depends on:
2382 * 1) TX COE if actually supported
2383 * 2) There is no bugged Jumbo frame support
2384 * that needs to not insert csum in the TDES.
2385 */
2386 txmode = SF_DMA_MODE;
2387 rxmode = SF_DMA_MODE;
2388 priv->xstats.threshold = SF_DMA_MODE;
2389 } else {
2390 txmode = tc;
2391 rxmode = SF_DMA_MODE;
2392 }
2393
2394 /* configure all channels */
2395 for (chan = 0; chan < rx_channels_count; chan++) {
2396 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2397 u32 buf_size;
2398
2399 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2400
2401 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2402 rxfifosz, qmode);
2403
2404 if (rx_q->xsk_pool) {
2405 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2406 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2407 buf_size,
2408 chan);
2409 } else {
2410 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2411 priv->dma_conf.dma_buf_sz,
2412 chan);
2413 }
2414 }
2415
2416 for (chan = 0; chan < tx_channels_count; chan++) {
2417 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2418
2419 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2420 txfifosz, qmode);
2421 }
2422 }
2423
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2424 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2425 {
2426 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2427 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2428 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2429 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2430 unsigned int entry = tx_q->cur_tx;
2431 struct dma_desc *tx_desc = NULL;
2432 struct xdp_desc xdp_desc;
2433 bool work_done = true;
2434 u32 tx_set_ic_bit = 0;
2435
2436 /* Avoids TX time-out as we are sharing with slow path */
2437 txq_trans_cond_update(nq);
2438
2439 budget = min(budget, stmmac_tx_avail(priv, queue));
2440
2441 while (budget-- > 0) {
2442 dma_addr_t dma_addr;
2443 bool set_ic;
2444
2445 /* We are sharing with slow path and stop XSK TX desc submission when
2446 * available TX ring is less than threshold.
2447 */
2448 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2449 !netif_carrier_ok(priv->dev)) {
2450 work_done = false;
2451 break;
2452 }
2453
2454 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2455 break;
2456
2457 if (likely(priv->extend_desc))
2458 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2459 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2460 tx_desc = &tx_q->dma_entx[entry].basic;
2461 else
2462 tx_desc = tx_q->dma_tx + entry;
2463
2464 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2465 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2466
2467 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2468
2469 /* To return XDP buffer to XSK pool, we simple call
2470 * xsk_tx_completed(), so we don't need to fill up
2471 * 'buf' and 'xdpf'.
2472 */
2473 tx_q->tx_skbuff_dma[entry].buf = 0;
2474 tx_q->xdpf[entry] = NULL;
2475
2476 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2477 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2478 tx_q->tx_skbuff_dma[entry].last_segment = true;
2479 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2480
2481 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2482
2483 tx_q->tx_count_frames++;
2484
2485 if (!priv->tx_coal_frames[queue])
2486 set_ic = false;
2487 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2488 set_ic = true;
2489 else
2490 set_ic = false;
2491
2492 if (set_ic) {
2493 tx_q->tx_count_frames = 0;
2494 stmmac_set_tx_ic(priv, tx_desc);
2495 tx_set_ic_bit++;
2496 }
2497
2498 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2499 true, priv->mode, true, true,
2500 xdp_desc.len);
2501
2502 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2503
2504 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2505 entry = tx_q->cur_tx;
2506 }
2507 u64_stats_update_begin(&txq_stats->napi_syncp);
2508 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2509 u64_stats_update_end(&txq_stats->napi_syncp);
2510
2511 if (tx_desc) {
2512 stmmac_flush_tx_descriptors(priv, queue);
2513 xsk_tx_release(pool);
2514 }
2515
2516 /* Return true if all of the 3 conditions are met
2517 * a) TX Budget is still available
2518 * b) work_done = true when XSK TX desc peek is empty (no more
2519 * pending XSK TX for transmission)
2520 */
2521 return !!budget && work_done;
2522 }
2523
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2524 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2525 {
2526 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2527 tc += 64;
2528
2529 if (priv->plat->force_thresh_dma_mode)
2530 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2531 else
2532 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2533 chan);
2534
2535 priv->xstats.threshold = tc;
2536 }
2537 }
2538
2539 /**
2540 * stmmac_tx_clean - to manage the transmission completion
2541 * @priv: driver private structure
2542 * @budget: napi budget limiting this functions packet handling
2543 * @queue: TX queue index
2544 * Description: it reclaims the transmit resources after transmission completes.
2545 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)2546 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2547 {
2548 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2549 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2550 unsigned int bytes_compl = 0, pkts_compl = 0;
2551 unsigned int entry, xmits = 0, count = 0;
2552 u32 tx_packets = 0, tx_errors = 0;
2553
2554 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2555
2556 tx_q->xsk_frames_done = 0;
2557
2558 entry = tx_q->dirty_tx;
2559
2560 /* Try to clean all TX complete frame in 1 shot */
2561 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2562 struct xdp_frame *xdpf;
2563 struct sk_buff *skb;
2564 struct dma_desc *p;
2565 int status;
2566
2567 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2568 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2569 xdpf = tx_q->xdpf[entry];
2570 skb = NULL;
2571 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2572 xdpf = NULL;
2573 skb = tx_q->tx_skbuff[entry];
2574 } else {
2575 xdpf = NULL;
2576 skb = NULL;
2577 }
2578
2579 if (priv->extend_desc)
2580 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2581 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2582 p = &tx_q->dma_entx[entry].basic;
2583 else
2584 p = tx_q->dma_tx + entry;
2585
2586 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2587 /* Check if the descriptor is owned by the DMA */
2588 if (unlikely(status & tx_dma_own))
2589 break;
2590
2591 count++;
2592
2593 /* Make sure descriptor fields are read after reading
2594 * the own bit.
2595 */
2596 dma_rmb();
2597
2598 /* Just consider the last segment and ...*/
2599 if (likely(!(status & tx_not_ls))) {
2600 /* ... verify the status error condition */
2601 if (unlikely(status & tx_err)) {
2602 tx_errors++;
2603 if (unlikely(status & tx_err_bump_tc))
2604 stmmac_bump_dma_threshold(priv, queue);
2605 } else {
2606 tx_packets++;
2607 }
2608 if (skb)
2609 stmmac_get_tx_hwtstamp(priv, p, skb);
2610 }
2611
2612 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2613 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2614 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2615 dma_unmap_page(priv->device,
2616 tx_q->tx_skbuff_dma[entry].buf,
2617 tx_q->tx_skbuff_dma[entry].len,
2618 DMA_TO_DEVICE);
2619 else
2620 dma_unmap_single(priv->device,
2621 tx_q->tx_skbuff_dma[entry].buf,
2622 tx_q->tx_skbuff_dma[entry].len,
2623 DMA_TO_DEVICE);
2624 tx_q->tx_skbuff_dma[entry].buf = 0;
2625 tx_q->tx_skbuff_dma[entry].len = 0;
2626 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2627 }
2628
2629 stmmac_clean_desc3(priv, tx_q, p);
2630
2631 tx_q->tx_skbuff_dma[entry].last_segment = false;
2632 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2633
2634 if (xdpf &&
2635 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2636 xdp_return_frame_rx_napi(xdpf);
2637 tx_q->xdpf[entry] = NULL;
2638 }
2639
2640 if (xdpf &&
2641 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2642 xdp_return_frame(xdpf);
2643 tx_q->xdpf[entry] = NULL;
2644 }
2645
2646 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2647 tx_q->xsk_frames_done++;
2648
2649 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2650 if (likely(skb)) {
2651 pkts_compl++;
2652 bytes_compl += skb->len;
2653 dev_consume_skb_any(skb);
2654 tx_q->tx_skbuff[entry] = NULL;
2655 }
2656 }
2657
2658 stmmac_release_tx_desc(priv, p, priv->mode);
2659
2660 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2661 }
2662 tx_q->dirty_tx = entry;
2663
2664 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2665 pkts_compl, bytes_compl);
2666
2667 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2668 queue))) &&
2669 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2670
2671 netif_dbg(priv, tx_done, priv->dev,
2672 "%s: restart transmit\n", __func__);
2673 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2674 }
2675
2676 if (tx_q->xsk_pool) {
2677 bool work_done;
2678
2679 if (tx_q->xsk_frames_done)
2680 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2681
2682 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2683 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2684
2685 /* For XSK TX, we try to send as many as possible.
2686 * If XSK work done (XSK TX desc empty and budget still
2687 * available), return "budget - 1" to reenable TX IRQ.
2688 * Else, return "budget" to make NAPI continue polling.
2689 */
2690 work_done = stmmac_xdp_xmit_zc(priv, queue,
2691 STMMAC_XSK_TX_BUDGET_MAX);
2692 if (work_done)
2693 xmits = budget - 1;
2694 else
2695 xmits = budget;
2696 }
2697
2698 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2699 priv->eee_sw_timer_en) {
2700 if (stmmac_enable_eee_mode(priv))
2701 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2702 }
2703
2704 /* We still have pending packets, let's call for a new scheduling */
2705 if (tx_q->dirty_tx != tx_q->cur_tx)
2706 stmmac_tx_timer_arm(priv, queue);
2707
2708 u64_stats_update_begin(&txq_stats->napi_syncp);
2709 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2710 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2711 u64_stats_inc(&txq_stats->napi.tx_clean);
2712 u64_stats_update_end(&txq_stats->napi_syncp);
2713
2714 priv->xstats.tx_errors += tx_errors;
2715
2716 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2717
2718 /* Combine decisions from TX clean and XSK TX */
2719 return max(count, xmits);
2720 }
2721
2722 /**
2723 * stmmac_tx_err - to manage the tx error
2724 * @priv: driver private structure
2725 * @chan: channel index
2726 * Description: it cleans the descriptors and restarts the transmission
2727 * in case of transmission errors.
2728 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2729 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2730 {
2731 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2732
2733 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2734
2735 stmmac_stop_tx_dma(priv, chan);
2736 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2737 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2738 stmmac_reset_tx_queue(priv, chan);
2739 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2740 tx_q->dma_tx_phy, chan);
2741 stmmac_start_tx_dma(priv, chan);
2742
2743 priv->xstats.tx_errors++;
2744 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2745 }
2746
2747 /**
2748 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2749 * @priv: driver private structure
2750 * @txmode: TX operating mode
2751 * @rxmode: RX operating mode
2752 * @chan: channel index
2753 * Description: it is used for configuring of the DMA operation mode in
2754 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2755 * mode.
2756 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2757 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2758 u32 rxmode, u32 chan)
2759 {
2760 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2761 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2762 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2763 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2764 int rxfifosz = priv->plat->rx_fifo_size;
2765 int txfifosz = priv->plat->tx_fifo_size;
2766
2767 if (rxfifosz == 0)
2768 rxfifosz = priv->dma_cap.rx_fifo_size;
2769 if (txfifosz == 0)
2770 txfifosz = priv->dma_cap.tx_fifo_size;
2771
2772 /* Adjust for real per queue fifo size */
2773 rxfifosz /= rx_channels_count;
2774 txfifosz /= tx_channels_count;
2775
2776 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2777 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2778 }
2779
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2780 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2781 {
2782 int ret;
2783
2784 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2785 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2786 if (ret && (ret != -EINVAL)) {
2787 stmmac_global_err(priv);
2788 return true;
2789 }
2790
2791 return false;
2792 }
2793
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2794 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2795 {
2796 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2797 &priv->xstats, chan, dir);
2798 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2799 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2800 struct stmmac_channel *ch = &priv->channel[chan];
2801 struct napi_struct *rx_napi;
2802 struct napi_struct *tx_napi;
2803 unsigned long flags;
2804
2805 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2806 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2807
2808 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2809 if (napi_schedule_prep(rx_napi)) {
2810 spin_lock_irqsave(&ch->lock, flags);
2811 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2812 spin_unlock_irqrestore(&ch->lock, flags);
2813 __napi_schedule(rx_napi);
2814 }
2815 }
2816
2817 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2818 if (napi_schedule_prep(tx_napi)) {
2819 spin_lock_irqsave(&ch->lock, flags);
2820 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2821 spin_unlock_irqrestore(&ch->lock, flags);
2822 __napi_schedule(tx_napi);
2823 }
2824 }
2825
2826 return status;
2827 }
2828
2829 /**
2830 * stmmac_dma_interrupt - DMA ISR
2831 * @priv: driver private structure
2832 * Description: this is the DMA ISR. It is called by the main ISR.
2833 * It calls the dwmac dma routine and schedule poll method in case of some
2834 * work can be done.
2835 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2836 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2837 {
2838 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2839 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2840 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2841 tx_channel_count : rx_channel_count;
2842 u32 chan;
2843 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2844
2845 /* Make sure we never check beyond our status buffer. */
2846 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2847 channels_to_check = ARRAY_SIZE(status);
2848
2849 for (chan = 0; chan < channels_to_check; chan++)
2850 status[chan] = stmmac_napi_check(priv, chan,
2851 DMA_DIR_RXTX);
2852
2853 for (chan = 0; chan < tx_channel_count; chan++) {
2854 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2855 /* Try to bump up the dma threshold on this failure */
2856 stmmac_bump_dma_threshold(priv, chan);
2857 } else if (unlikely(status[chan] == tx_hard_error)) {
2858 stmmac_tx_err(priv, chan);
2859 }
2860 }
2861 }
2862
2863 /**
2864 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2865 * @priv: driver private structure
2866 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2867 */
stmmac_mmc_setup(struct stmmac_priv * priv)2868 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2869 {
2870 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2871 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2872
2873 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2874
2875 if (priv->dma_cap.rmon) {
2876 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2877 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2878 } else
2879 netdev_info(priv->dev, "No MAC Management Counters available\n");
2880 }
2881
2882 /**
2883 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2884 * @priv: driver private structure
2885 * Description:
2886 * new GMAC chip generations have a new register to indicate the
2887 * presence of the optional feature/functions.
2888 * This can be also used to override the value passed through the
2889 * platform and necessary for old MAC10/100 and GMAC chips.
2890 */
stmmac_get_hw_features(struct stmmac_priv * priv)2891 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2892 {
2893 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2894 }
2895
2896 /**
2897 * stmmac_check_ether_addr - check if the MAC addr is valid
2898 * @priv: driver private structure
2899 * Description:
2900 * it is to verify if the MAC address is valid, in case of failures it
2901 * generates a random MAC address
2902 */
stmmac_check_ether_addr(struct stmmac_priv * priv)2903 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2904 {
2905 u8 addr[ETH_ALEN];
2906
2907 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2908 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2909 if (is_valid_ether_addr(addr))
2910 eth_hw_addr_set(priv->dev, addr);
2911 else
2912 eth_hw_addr_random(priv->dev);
2913 dev_info(priv->device, "device MAC address %pM\n",
2914 priv->dev->dev_addr);
2915 }
2916 }
2917
2918 /**
2919 * stmmac_init_dma_engine - DMA init.
2920 * @priv: driver private structure
2921 * Description:
2922 * It inits the DMA invoking the specific MAC/GMAC callback.
2923 * Some DMA parameters can be passed from the platform;
2924 * in case of these are not passed a default is kept for the MAC or GMAC.
2925 */
stmmac_init_dma_engine(struct stmmac_priv * priv)2926 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2927 {
2928 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2929 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2930 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2931 struct stmmac_rx_queue *rx_q;
2932 struct stmmac_tx_queue *tx_q;
2933 u32 chan = 0;
2934 int atds = 0;
2935 int ret = 0;
2936
2937 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2938 dev_err(priv->device, "Invalid DMA configuration\n");
2939 return -EINVAL;
2940 }
2941
2942 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2943 atds = 1;
2944
2945 ret = stmmac_reset(priv, priv->ioaddr);
2946 if (ret) {
2947 dev_err(priv->device, "Failed to reset the dma\n");
2948 return ret;
2949 }
2950
2951 /* DMA Configuration */
2952 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2953
2954 if (priv->plat->axi)
2955 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2956
2957 /* DMA CSR Channel configuration */
2958 for (chan = 0; chan < dma_csr_ch; chan++) {
2959 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2960 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2961 }
2962
2963 /* DMA RX Channel Configuration */
2964 for (chan = 0; chan < rx_channels_count; chan++) {
2965 rx_q = &priv->dma_conf.rx_queue[chan];
2966
2967 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2968 rx_q->dma_rx_phy, chan);
2969
2970 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2971 (rx_q->buf_alloc_num *
2972 sizeof(struct dma_desc));
2973 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2974 rx_q->rx_tail_addr, chan);
2975 }
2976
2977 /* DMA TX Channel Configuration */
2978 for (chan = 0; chan < tx_channels_count; chan++) {
2979 tx_q = &priv->dma_conf.tx_queue[chan];
2980
2981 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2982 tx_q->dma_tx_phy, chan);
2983
2984 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2985 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2986 tx_q->tx_tail_addr, chan);
2987 }
2988
2989 return ret;
2990 }
2991
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)2992 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2993 {
2994 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2995 u32 tx_coal_timer = priv->tx_coal_timer[queue];
2996
2997 if (!tx_coal_timer)
2998 return;
2999
3000 hrtimer_start(&tx_q->txtimer,
3001 STMMAC_COAL_TIMER(tx_coal_timer),
3002 HRTIMER_MODE_REL);
3003 }
3004
3005 /**
3006 * stmmac_tx_timer - mitigation sw timer for tx.
3007 * @t: data pointer
3008 * Description:
3009 * This is the timer handler to directly invoke the stmmac_tx_clean.
3010 */
stmmac_tx_timer(struct hrtimer * t)3011 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3012 {
3013 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3014 struct stmmac_priv *priv = tx_q->priv_data;
3015 struct stmmac_channel *ch;
3016 struct napi_struct *napi;
3017
3018 ch = &priv->channel[tx_q->queue_index];
3019 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3020
3021 if (likely(napi_schedule_prep(napi))) {
3022 unsigned long flags;
3023
3024 spin_lock_irqsave(&ch->lock, flags);
3025 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3026 spin_unlock_irqrestore(&ch->lock, flags);
3027 __napi_schedule(napi);
3028 }
3029
3030 return HRTIMER_NORESTART;
3031 }
3032
3033 /**
3034 * stmmac_init_coalesce - init mitigation options.
3035 * @priv: driver private structure
3036 * Description:
3037 * This inits the coalesce parameters: i.e. timer rate,
3038 * timer handler and default threshold used for enabling the
3039 * interrupt on completion bit.
3040 */
stmmac_init_coalesce(struct stmmac_priv * priv)3041 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3042 {
3043 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3044 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3045 u32 chan;
3046
3047 for (chan = 0; chan < tx_channel_count; chan++) {
3048 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3049
3050 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3051 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3052
3053 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3054 tx_q->txtimer.function = stmmac_tx_timer;
3055 }
3056
3057 for (chan = 0; chan < rx_channel_count; chan++)
3058 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3059 }
3060
stmmac_set_rings_length(struct stmmac_priv * priv)3061 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3062 {
3063 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3064 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3065 u32 chan;
3066
3067 /* set TX ring length */
3068 for (chan = 0; chan < tx_channels_count; chan++)
3069 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3070 (priv->dma_conf.dma_tx_size - 1), chan);
3071
3072 /* set RX ring length */
3073 for (chan = 0; chan < rx_channels_count; chan++)
3074 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3075 (priv->dma_conf.dma_rx_size - 1), chan);
3076 }
3077
3078 /**
3079 * stmmac_set_tx_queue_weight - Set TX queue weight
3080 * @priv: driver private structure
3081 * Description: It is used for setting TX queues weight
3082 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3083 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3084 {
3085 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3086 u32 weight;
3087 u32 queue;
3088
3089 for (queue = 0; queue < tx_queues_count; queue++) {
3090 weight = priv->plat->tx_queues_cfg[queue].weight;
3091 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3092 }
3093 }
3094
3095 /**
3096 * stmmac_configure_cbs - Configure CBS in TX queue
3097 * @priv: driver private structure
3098 * Description: It is used for configuring CBS in AVB TX queues
3099 */
stmmac_configure_cbs(struct stmmac_priv * priv)3100 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3101 {
3102 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3103 u32 mode_to_use;
3104 u32 queue;
3105
3106 /* queue 0 is reserved for legacy traffic */
3107 for (queue = 1; queue < tx_queues_count; queue++) {
3108 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3109 if (mode_to_use == MTL_QUEUE_DCB)
3110 continue;
3111
3112 stmmac_config_cbs(priv, priv->hw,
3113 priv->plat->tx_queues_cfg[queue].send_slope,
3114 priv->plat->tx_queues_cfg[queue].idle_slope,
3115 priv->plat->tx_queues_cfg[queue].high_credit,
3116 priv->plat->tx_queues_cfg[queue].low_credit,
3117 queue);
3118 }
3119 }
3120
3121 /**
3122 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3123 * @priv: driver private structure
3124 * Description: It is used for mapping RX queues to RX dma channels
3125 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3126 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3127 {
3128 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3129 u32 queue;
3130 u32 chan;
3131
3132 for (queue = 0; queue < rx_queues_count; queue++) {
3133 chan = priv->plat->rx_queues_cfg[queue].chan;
3134 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3135 }
3136 }
3137
3138 /**
3139 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3140 * @priv: driver private structure
3141 * Description: It is used for configuring the RX Queue Priority
3142 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3143 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3144 {
3145 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3146 u32 queue;
3147 u32 prio;
3148
3149 for (queue = 0; queue < rx_queues_count; queue++) {
3150 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3151 continue;
3152
3153 prio = priv->plat->rx_queues_cfg[queue].prio;
3154 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3155 }
3156 }
3157
3158 /**
3159 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3160 * @priv: driver private structure
3161 * Description: It is used for configuring the TX Queue Priority
3162 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3163 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3164 {
3165 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3166 u32 queue;
3167 u32 prio;
3168
3169 for (queue = 0; queue < tx_queues_count; queue++) {
3170 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3171 continue;
3172
3173 prio = priv->plat->tx_queues_cfg[queue].prio;
3174 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3175 }
3176 }
3177
3178 /**
3179 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3180 * @priv: driver private structure
3181 * Description: It is used for configuring the RX queue routing
3182 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3183 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3184 {
3185 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3186 u32 queue;
3187 u8 packet;
3188
3189 for (queue = 0; queue < rx_queues_count; queue++) {
3190 /* no specific packet type routing specified for the queue */
3191 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3192 continue;
3193
3194 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3195 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3196 }
3197 }
3198
stmmac_mac_config_rss(struct stmmac_priv * priv)3199 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3200 {
3201 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3202 priv->rss.enable = false;
3203 return;
3204 }
3205
3206 if (priv->dev->features & NETIF_F_RXHASH)
3207 priv->rss.enable = true;
3208 else
3209 priv->rss.enable = false;
3210
3211 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3212 priv->plat->rx_queues_to_use);
3213 }
3214
3215 /**
3216 * stmmac_mtl_configuration - Configure MTL
3217 * @priv: driver private structure
3218 * Description: It is used for configurring MTL
3219 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3220 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3221 {
3222 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3223 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3224
3225 if (tx_queues_count > 1)
3226 stmmac_set_tx_queue_weight(priv);
3227
3228 /* Configure MTL RX algorithms */
3229 if (rx_queues_count > 1)
3230 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3231 priv->plat->rx_sched_algorithm);
3232
3233 /* Configure MTL TX algorithms */
3234 if (tx_queues_count > 1)
3235 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3236 priv->plat->tx_sched_algorithm);
3237
3238 /* Configure CBS in AVB TX queues */
3239 if (tx_queues_count > 1)
3240 stmmac_configure_cbs(priv);
3241
3242 /* Map RX MTL to DMA channels */
3243 stmmac_rx_queue_dma_chan_map(priv);
3244
3245 /* Enable MAC RX Queues */
3246 stmmac_mac_enable_rx_queues(priv);
3247
3248 /* Set RX priorities */
3249 if (rx_queues_count > 1)
3250 stmmac_mac_config_rx_queues_prio(priv);
3251
3252 /* Set TX priorities */
3253 if (tx_queues_count > 1)
3254 stmmac_mac_config_tx_queues_prio(priv);
3255
3256 /* Set RX routing */
3257 if (rx_queues_count > 1)
3258 stmmac_mac_config_rx_queues_routing(priv);
3259
3260 /* Receive Side Scaling */
3261 if (rx_queues_count > 1)
3262 stmmac_mac_config_rss(priv);
3263 }
3264
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3265 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3266 {
3267 if (priv->dma_cap.asp) {
3268 netdev_info(priv->dev, "Enabling Safety Features\n");
3269 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3270 priv->plat->safety_feat_cfg);
3271 } else {
3272 netdev_info(priv->dev, "No Safety Features support found\n");
3273 }
3274 }
3275
stmmac_fpe_start_wq(struct stmmac_priv * priv)3276 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3277 {
3278 char *name;
3279
3280 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3281 clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
3282
3283 name = priv->wq_name;
3284 sprintf(name, "%s-fpe", priv->dev->name);
3285
3286 priv->fpe_wq = create_singlethread_workqueue(name);
3287 if (!priv->fpe_wq) {
3288 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3289
3290 return -ENOMEM;
3291 }
3292 netdev_info(priv->dev, "FPE workqueue start");
3293
3294 return 0;
3295 }
3296
3297 /**
3298 * stmmac_hw_setup - setup mac in a usable state.
3299 * @dev : pointer to the device structure.
3300 * @ptp_register: register PTP if set
3301 * Description:
3302 * this is the main function to setup the HW in a usable state because the
3303 * dma engine is reset, the core registers are configured (e.g. AXI,
3304 * Checksum features, timers). The DMA is ready to start receiving and
3305 * transmitting.
3306 * Return value:
3307 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3308 * file on failure.
3309 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3310 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3311 {
3312 struct stmmac_priv *priv = netdev_priv(dev);
3313 u32 rx_cnt = priv->plat->rx_queues_to_use;
3314 u32 tx_cnt = priv->plat->tx_queues_to_use;
3315 bool sph_en;
3316 u32 chan;
3317 int ret;
3318
3319 /* DMA initialization and SW reset */
3320 ret = stmmac_init_dma_engine(priv);
3321 if (ret < 0) {
3322 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3323 __func__);
3324 return ret;
3325 }
3326
3327 /* Copy the MAC addr into the HW */
3328 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3329
3330 /* PS and related bits will be programmed according to the speed */
3331 if (priv->hw->pcs) {
3332 int speed = priv->plat->mac_port_sel_speed;
3333
3334 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3335 (speed == SPEED_1000)) {
3336 priv->hw->ps = speed;
3337 } else {
3338 dev_warn(priv->device, "invalid port speed\n");
3339 priv->hw->ps = 0;
3340 }
3341 }
3342
3343 /* Initialize the MAC Core */
3344 stmmac_core_init(priv, priv->hw, dev);
3345
3346 /* Initialize MTL*/
3347 stmmac_mtl_configuration(priv);
3348
3349 /* Initialize Safety Features */
3350 stmmac_safety_feat_configuration(priv);
3351
3352 ret = stmmac_rx_ipc(priv, priv->hw);
3353 if (!ret) {
3354 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3355 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3356 priv->hw->rx_csum = 0;
3357 }
3358
3359 /* Enable the MAC Rx/Tx */
3360 stmmac_mac_set(priv, priv->ioaddr, true);
3361
3362 /* Set the HW DMA mode and the COE */
3363 stmmac_dma_operation_mode(priv);
3364
3365 stmmac_mmc_setup(priv);
3366
3367 if (ptp_register) {
3368 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3369 if (ret < 0)
3370 netdev_warn(priv->dev,
3371 "failed to enable PTP reference clock: %pe\n",
3372 ERR_PTR(ret));
3373 }
3374
3375 ret = stmmac_init_ptp(priv);
3376 if (ret == -EOPNOTSUPP)
3377 netdev_info(priv->dev, "PTP not supported by HW\n");
3378 else if (ret)
3379 netdev_warn(priv->dev, "PTP init failed\n");
3380 else if (ptp_register)
3381 stmmac_ptp_register(priv);
3382
3383 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3384
3385 /* Convert the timer from msec to usec */
3386 if (!priv->tx_lpi_timer)
3387 priv->tx_lpi_timer = eee_timer * 1000;
3388
3389 if (priv->use_riwt) {
3390 u32 queue;
3391
3392 for (queue = 0; queue < rx_cnt; queue++) {
3393 if (!priv->rx_riwt[queue])
3394 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3395
3396 stmmac_rx_watchdog(priv, priv->ioaddr,
3397 priv->rx_riwt[queue], queue);
3398 }
3399 }
3400
3401 if (priv->hw->pcs)
3402 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3403
3404 /* set TX and RX rings length */
3405 stmmac_set_rings_length(priv);
3406
3407 /* Enable TSO */
3408 if (priv->tso) {
3409 for (chan = 0; chan < tx_cnt; chan++) {
3410 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3411
3412 /* TSO and TBS cannot co-exist */
3413 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3414 continue;
3415
3416 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3417 }
3418 }
3419
3420 /* Enable Split Header */
3421 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3422 for (chan = 0; chan < rx_cnt; chan++)
3423 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3424
3425
3426 /* VLAN Tag Insertion */
3427 if (priv->dma_cap.vlins)
3428 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3429
3430 /* TBS */
3431 for (chan = 0; chan < tx_cnt; chan++) {
3432 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3433 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3434
3435 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3436 }
3437
3438 /* Configure real RX and TX queues */
3439 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3440 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3441
3442 /* Start the ball rolling... */
3443 stmmac_start_all_dma(priv);
3444
3445 if (priv->dma_cap.fpesel) {
3446 stmmac_fpe_start_wq(priv);
3447
3448 if (priv->plat->fpe_cfg->enable)
3449 stmmac_fpe_handshake(priv, true);
3450 }
3451
3452 return 0;
3453 }
3454
stmmac_hw_teardown(struct net_device * dev)3455 static void stmmac_hw_teardown(struct net_device *dev)
3456 {
3457 struct stmmac_priv *priv = netdev_priv(dev);
3458
3459 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3460 }
3461
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3462 static void stmmac_free_irq(struct net_device *dev,
3463 enum request_irq_err irq_err, int irq_idx)
3464 {
3465 struct stmmac_priv *priv = netdev_priv(dev);
3466 int j;
3467
3468 switch (irq_err) {
3469 case REQ_IRQ_ERR_ALL:
3470 irq_idx = priv->plat->tx_queues_to_use;
3471 fallthrough;
3472 case REQ_IRQ_ERR_TX:
3473 for (j = irq_idx - 1; j >= 0; j--) {
3474 if (priv->tx_irq[j] > 0) {
3475 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3476 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3477 }
3478 }
3479 irq_idx = priv->plat->rx_queues_to_use;
3480 fallthrough;
3481 case REQ_IRQ_ERR_RX:
3482 for (j = irq_idx - 1; j >= 0; j--) {
3483 if (priv->rx_irq[j] > 0) {
3484 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3485 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3486 }
3487 }
3488
3489 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3490 free_irq(priv->sfty_ue_irq, dev);
3491 fallthrough;
3492 case REQ_IRQ_ERR_SFTY_UE:
3493 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3494 free_irq(priv->sfty_ce_irq, dev);
3495 fallthrough;
3496 case REQ_IRQ_ERR_SFTY_CE:
3497 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3498 free_irq(priv->lpi_irq, dev);
3499 fallthrough;
3500 case REQ_IRQ_ERR_LPI:
3501 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3502 free_irq(priv->wol_irq, dev);
3503 fallthrough;
3504 case REQ_IRQ_ERR_WOL:
3505 free_irq(dev->irq, dev);
3506 fallthrough;
3507 case REQ_IRQ_ERR_MAC:
3508 case REQ_IRQ_ERR_NO:
3509 /* If MAC IRQ request error, no more IRQ to free */
3510 break;
3511 }
3512 }
3513
stmmac_request_irq_multi_msi(struct net_device * dev)3514 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3515 {
3516 struct stmmac_priv *priv = netdev_priv(dev);
3517 enum request_irq_err irq_err;
3518 cpumask_t cpu_mask;
3519 int irq_idx = 0;
3520 char *int_name;
3521 int ret;
3522 int i;
3523
3524 /* For common interrupt */
3525 int_name = priv->int_name_mac;
3526 sprintf(int_name, "%s:%s", dev->name, "mac");
3527 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3528 0, int_name, dev);
3529 if (unlikely(ret < 0)) {
3530 netdev_err(priv->dev,
3531 "%s: alloc mac MSI %d (error: %d)\n",
3532 __func__, dev->irq, ret);
3533 irq_err = REQ_IRQ_ERR_MAC;
3534 goto irq_error;
3535 }
3536
3537 /* Request the Wake IRQ in case of another line
3538 * is used for WoL
3539 */
3540 priv->wol_irq_disabled = true;
3541 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3542 int_name = priv->int_name_wol;
3543 sprintf(int_name, "%s:%s", dev->name, "wol");
3544 ret = request_irq(priv->wol_irq,
3545 stmmac_mac_interrupt,
3546 0, int_name, dev);
3547 if (unlikely(ret < 0)) {
3548 netdev_err(priv->dev,
3549 "%s: alloc wol MSI %d (error: %d)\n",
3550 __func__, priv->wol_irq, ret);
3551 irq_err = REQ_IRQ_ERR_WOL;
3552 goto irq_error;
3553 }
3554 }
3555
3556 /* Request the LPI IRQ in case of another line
3557 * is used for LPI
3558 */
3559 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3560 int_name = priv->int_name_lpi;
3561 sprintf(int_name, "%s:%s", dev->name, "lpi");
3562 ret = request_irq(priv->lpi_irq,
3563 stmmac_mac_interrupt,
3564 0, int_name, dev);
3565 if (unlikely(ret < 0)) {
3566 netdev_err(priv->dev,
3567 "%s: alloc lpi MSI %d (error: %d)\n",
3568 __func__, priv->lpi_irq, ret);
3569 irq_err = REQ_IRQ_ERR_LPI;
3570 goto irq_error;
3571 }
3572 }
3573
3574 /* Request the Safety Feature Correctible Error line in
3575 * case of another line is used
3576 */
3577 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3578 int_name = priv->int_name_sfty_ce;
3579 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3580 ret = request_irq(priv->sfty_ce_irq,
3581 stmmac_safety_interrupt,
3582 0, int_name, dev);
3583 if (unlikely(ret < 0)) {
3584 netdev_err(priv->dev,
3585 "%s: alloc sfty ce MSI %d (error: %d)\n",
3586 __func__, priv->sfty_ce_irq, ret);
3587 irq_err = REQ_IRQ_ERR_SFTY_CE;
3588 goto irq_error;
3589 }
3590 }
3591
3592 /* Request the Safety Feature Uncorrectible Error line in
3593 * case of another line is used
3594 */
3595 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3596 int_name = priv->int_name_sfty_ue;
3597 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3598 ret = request_irq(priv->sfty_ue_irq,
3599 stmmac_safety_interrupt,
3600 0, int_name, dev);
3601 if (unlikely(ret < 0)) {
3602 netdev_err(priv->dev,
3603 "%s: alloc sfty ue MSI %d (error: %d)\n",
3604 __func__, priv->sfty_ue_irq, ret);
3605 irq_err = REQ_IRQ_ERR_SFTY_UE;
3606 goto irq_error;
3607 }
3608 }
3609
3610 /* Request Rx MSI irq */
3611 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3612 if (i >= MTL_MAX_RX_QUEUES)
3613 break;
3614 if (priv->rx_irq[i] == 0)
3615 continue;
3616
3617 int_name = priv->int_name_rx_irq[i];
3618 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3619 ret = request_irq(priv->rx_irq[i],
3620 stmmac_msi_intr_rx,
3621 0, int_name, &priv->dma_conf.rx_queue[i]);
3622 if (unlikely(ret < 0)) {
3623 netdev_err(priv->dev,
3624 "%s: alloc rx-%d MSI %d (error: %d)\n",
3625 __func__, i, priv->rx_irq[i], ret);
3626 irq_err = REQ_IRQ_ERR_RX;
3627 irq_idx = i;
3628 goto irq_error;
3629 }
3630 cpumask_clear(&cpu_mask);
3631 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3632 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3633 }
3634
3635 /* Request Tx MSI irq */
3636 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3637 if (i >= MTL_MAX_TX_QUEUES)
3638 break;
3639 if (priv->tx_irq[i] == 0)
3640 continue;
3641
3642 int_name = priv->int_name_tx_irq[i];
3643 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3644 ret = request_irq(priv->tx_irq[i],
3645 stmmac_msi_intr_tx,
3646 0, int_name, &priv->dma_conf.tx_queue[i]);
3647 if (unlikely(ret < 0)) {
3648 netdev_err(priv->dev,
3649 "%s: alloc tx-%d MSI %d (error: %d)\n",
3650 __func__, i, priv->tx_irq[i], ret);
3651 irq_err = REQ_IRQ_ERR_TX;
3652 irq_idx = i;
3653 goto irq_error;
3654 }
3655 cpumask_clear(&cpu_mask);
3656 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3657 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3658 }
3659
3660 return 0;
3661
3662 irq_error:
3663 stmmac_free_irq(dev, irq_err, irq_idx);
3664 return ret;
3665 }
3666
stmmac_request_irq_single(struct net_device * dev)3667 static int stmmac_request_irq_single(struct net_device *dev)
3668 {
3669 struct stmmac_priv *priv = netdev_priv(dev);
3670 enum request_irq_err irq_err;
3671 int ret;
3672
3673 ret = request_irq(dev->irq, stmmac_interrupt,
3674 IRQF_SHARED, dev->name, dev);
3675 if (unlikely(ret < 0)) {
3676 netdev_err(priv->dev,
3677 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3678 __func__, dev->irq, ret);
3679 irq_err = REQ_IRQ_ERR_MAC;
3680 goto irq_error;
3681 }
3682
3683 /* Request the Wake IRQ in case of another line
3684 * is used for WoL
3685 */
3686 priv->wol_irq_disabled = true;
3687 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3688 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3689 IRQF_SHARED, dev->name, dev);
3690 if (unlikely(ret < 0)) {
3691 netdev_err(priv->dev,
3692 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3693 __func__, priv->wol_irq, ret);
3694 irq_err = REQ_IRQ_ERR_WOL;
3695 goto irq_error;
3696 }
3697 }
3698
3699 /* Request the IRQ lines */
3700 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3701 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3702 IRQF_SHARED, dev->name, dev);
3703 if (unlikely(ret < 0)) {
3704 netdev_err(priv->dev,
3705 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3706 __func__, priv->lpi_irq, ret);
3707 irq_err = REQ_IRQ_ERR_LPI;
3708 goto irq_error;
3709 }
3710 }
3711
3712 return 0;
3713
3714 irq_error:
3715 stmmac_free_irq(dev, irq_err, 0);
3716 return ret;
3717 }
3718
stmmac_request_irq(struct net_device * dev)3719 static int stmmac_request_irq(struct net_device *dev)
3720 {
3721 struct stmmac_priv *priv = netdev_priv(dev);
3722 int ret;
3723
3724 /* Request the IRQ lines */
3725 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3726 ret = stmmac_request_irq_multi_msi(dev);
3727 else
3728 ret = stmmac_request_irq_single(dev);
3729
3730 return ret;
3731 }
3732
3733 /**
3734 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3735 * @priv: driver private structure
3736 * @mtu: MTU to setup the dma queue and buf with
3737 * Description: Allocate and generate a dma_conf based on the provided MTU.
3738 * Allocate the Tx/Rx DMA queue and init them.
3739 * Return value:
3740 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3741 */
3742 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3743 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3744 {
3745 struct stmmac_dma_conf *dma_conf;
3746 int chan, bfsize, ret;
3747
3748 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3749 if (!dma_conf) {
3750 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3751 __func__);
3752 return ERR_PTR(-ENOMEM);
3753 }
3754
3755 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3756 if (bfsize < 0)
3757 bfsize = 0;
3758
3759 if (bfsize < BUF_SIZE_16KiB)
3760 bfsize = stmmac_set_bfsize(mtu, 0);
3761
3762 dma_conf->dma_buf_sz = bfsize;
3763 /* Chose the tx/rx size from the already defined one in the
3764 * priv struct. (if defined)
3765 */
3766 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3767 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3768
3769 if (!dma_conf->dma_tx_size)
3770 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3771 if (!dma_conf->dma_rx_size)
3772 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3773
3774 /* Earlier check for TBS */
3775 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3776 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3777 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3778
3779 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3780 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3781 }
3782
3783 ret = alloc_dma_desc_resources(priv, dma_conf);
3784 if (ret < 0) {
3785 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3786 __func__);
3787 goto alloc_error;
3788 }
3789
3790 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3791 if (ret < 0) {
3792 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3793 __func__);
3794 goto init_error;
3795 }
3796
3797 return dma_conf;
3798
3799 init_error:
3800 free_dma_desc_resources(priv, dma_conf);
3801 alloc_error:
3802 kfree(dma_conf);
3803 return ERR_PTR(ret);
3804 }
3805
3806 /**
3807 * __stmmac_open - open entry point of the driver
3808 * @dev : pointer to the device structure.
3809 * @dma_conf : structure to take the dma data
3810 * Description:
3811 * This function is the open entry point of the driver.
3812 * Return value:
3813 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3814 * file on failure.
3815 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3816 static int __stmmac_open(struct net_device *dev,
3817 struct stmmac_dma_conf *dma_conf)
3818 {
3819 struct stmmac_priv *priv = netdev_priv(dev);
3820 int mode = priv->plat->phy_interface;
3821 u32 chan;
3822 int ret;
3823
3824 ret = pm_runtime_resume_and_get(priv->device);
3825 if (ret < 0)
3826 return ret;
3827
3828 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3829 priv->hw->pcs != STMMAC_PCS_RTBI &&
3830 (!priv->hw->xpcs ||
3831 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3832 !priv->hw->lynx_pcs) {
3833 ret = stmmac_init_phy(dev);
3834 if (ret) {
3835 netdev_err(priv->dev,
3836 "%s: Cannot attach to PHY (error: %d)\n",
3837 __func__, ret);
3838 goto init_phy_error;
3839 }
3840 }
3841
3842 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3843
3844 buf_sz = dma_conf->dma_buf_sz;
3845 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3846 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3847 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3848 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3849
3850 stmmac_reset_queues_param(priv);
3851
3852 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3853 priv->plat->serdes_powerup) {
3854 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3855 if (ret < 0) {
3856 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3857 __func__);
3858 goto init_error;
3859 }
3860 }
3861
3862 ret = stmmac_hw_setup(dev, true);
3863 if (ret < 0) {
3864 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3865 goto init_error;
3866 }
3867
3868 stmmac_init_coalesce(priv);
3869
3870 phylink_start(priv->phylink);
3871 /* We may have called phylink_speed_down before */
3872 phylink_speed_up(priv->phylink);
3873
3874 ret = stmmac_request_irq(dev);
3875 if (ret)
3876 goto irq_error;
3877
3878 stmmac_enable_all_queues(priv);
3879 netif_tx_start_all_queues(priv->dev);
3880 stmmac_enable_all_dma_irq(priv);
3881
3882 return 0;
3883
3884 irq_error:
3885 phylink_stop(priv->phylink);
3886
3887 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3888 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3889
3890 stmmac_hw_teardown(dev);
3891 init_error:
3892 phylink_disconnect_phy(priv->phylink);
3893 init_phy_error:
3894 pm_runtime_put(priv->device);
3895 return ret;
3896 }
3897
stmmac_open(struct net_device * dev)3898 static int stmmac_open(struct net_device *dev)
3899 {
3900 struct stmmac_priv *priv = netdev_priv(dev);
3901 struct stmmac_dma_conf *dma_conf;
3902 int ret;
3903
3904 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3905 if (IS_ERR(dma_conf))
3906 return PTR_ERR(dma_conf);
3907
3908 ret = __stmmac_open(dev, dma_conf);
3909 if (ret)
3910 free_dma_desc_resources(priv, dma_conf);
3911
3912 kfree(dma_conf);
3913 return ret;
3914 }
3915
stmmac_fpe_stop_wq(struct stmmac_priv * priv)3916 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3917 {
3918 set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3919
3920 if (priv->fpe_wq) {
3921 destroy_workqueue(priv->fpe_wq);
3922 priv->fpe_wq = NULL;
3923 }
3924
3925 netdev_info(priv->dev, "FPE workqueue stop");
3926 }
3927
3928 /**
3929 * stmmac_release - close entry point of the driver
3930 * @dev : device pointer.
3931 * Description:
3932 * This is the stop entry point of the driver.
3933 */
stmmac_release(struct net_device * dev)3934 static int stmmac_release(struct net_device *dev)
3935 {
3936 struct stmmac_priv *priv = netdev_priv(dev);
3937 u32 chan;
3938
3939 if (device_may_wakeup(priv->device))
3940 phylink_speed_down(priv->phylink, false);
3941 /* Stop and disconnect the PHY */
3942 phylink_stop(priv->phylink);
3943 phylink_disconnect_phy(priv->phylink);
3944
3945 stmmac_disable_all_queues(priv);
3946
3947 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3948 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3949
3950 netif_tx_disable(dev);
3951
3952 /* Free the IRQ lines */
3953 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3954
3955 if (priv->eee_enabled) {
3956 priv->tx_path_in_lpi_mode = false;
3957 del_timer_sync(&priv->eee_ctrl_timer);
3958 }
3959
3960 /* Stop TX/RX DMA and clear the descriptors */
3961 stmmac_stop_all_dma(priv);
3962
3963 /* Release and free the Rx/Tx resources */
3964 free_dma_desc_resources(priv, &priv->dma_conf);
3965
3966 /* Disable the MAC Rx/Tx */
3967 stmmac_mac_set(priv, priv->ioaddr, false);
3968
3969 /* Powerdown Serdes if there is */
3970 if (priv->plat->serdes_powerdown)
3971 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3972
3973 netif_carrier_off(dev);
3974
3975 stmmac_release_ptp(priv);
3976
3977 pm_runtime_put(priv->device);
3978
3979 if (priv->dma_cap.fpesel)
3980 stmmac_fpe_stop_wq(priv);
3981
3982 return 0;
3983 }
3984
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)3985 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3986 struct stmmac_tx_queue *tx_q)
3987 {
3988 u16 tag = 0x0, inner_tag = 0x0;
3989 u32 inner_type = 0x0;
3990 struct dma_desc *p;
3991
3992 if (!priv->dma_cap.vlins)
3993 return false;
3994 if (!skb_vlan_tag_present(skb))
3995 return false;
3996 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3997 inner_tag = skb_vlan_tag_get(skb);
3998 inner_type = STMMAC_VLAN_INSERT;
3999 }
4000
4001 tag = skb_vlan_tag_get(skb);
4002
4003 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4004 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4005 else
4006 p = &tx_q->dma_tx[tx_q->cur_tx];
4007
4008 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4009 return false;
4010
4011 stmmac_set_tx_owner(priv, p);
4012 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4013 return true;
4014 }
4015
4016 /**
4017 * stmmac_tso_allocator - close entry point of the driver
4018 * @priv: driver private structure
4019 * @des: buffer start address
4020 * @total_len: total length to fill in descriptors
4021 * @last_segment: condition for the last descriptor
4022 * @queue: TX queue index
4023 * Description:
4024 * This function fills descriptor and request new descriptors according to
4025 * buffer length to fill
4026 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4027 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4028 int total_len, bool last_segment, u32 queue)
4029 {
4030 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4031 struct dma_desc *desc;
4032 u32 buff_size;
4033 int tmp_len;
4034
4035 tmp_len = total_len;
4036
4037 while (tmp_len > 0) {
4038 dma_addr_t curr_addr;
4039
4040 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4041 priv->dma_conf.dma_tx_size);
4042 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4043
4044 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4045 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4046 else
4047 desc = &tx_q->dma_tx[tx_q->cur_tx];
4048
4049 curr_addr = des + (total_len - tmp_len);
4050 if (priv->dma_cap.addr64 <= 32)
4051 desc->des0 = cpu_to_le32(curr_addr);
4052 else
4053 stmmac_set_desc_addr(priv, desc, curr_addr);
4054
4055 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4056 TSO_MAX_BUFF_SIZE : tmp_len;
4057
4058 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4059 0, 1,
4060 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4061 0, 0);
4062
4063 tmp_len -= TSO_MAX_BUFF_SIZE;
4064 }
4065 }
4066
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4067 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4068 {
4069 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4070 int desc_size;
4071
4072 if (likely(priv->extend_desc))
4073 desc_size = sizeof(struct dma_extended_desc);
4074 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4075 desc_size = sizeof(struct dma_edesc);
4076 else
4077 desc_size = sizeof(struct dma_desc);
4078
4079 /* The own bit must be the latest setting done when prepare the
4080 * descriptor and then barrier is needed to make sure that
4081 * all is coherent before granting the DMA engine.
4082 */
4083 wmb();
4084
4085 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4086 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4087 }
4088
4089 /**
4090 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4091 * @skb : the socket buffer
4092 * @dev : device pointer
4093 * Description: this is the transmit function that is called on TSO frames
4094 * (support available on GMAC4 and newer chips).
4095 * Diagram below show the ring programming in case of TSO frames:
4096 *
4097 * First Descriptor
4098 * --------
4099 * | DES0 |---> buffer1 = L2/L3/L4 header
4100 * | DES1 |---> TCP Payload (can continue on next descr...)
4101 * | DES2 |---> buffer 1 and 2 len
4102 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4103 * --------
4104 * |
4105 * ...
4106 * |
4107 * --------
4108 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4109 * | DES1 | --|
4110 * | DES2 | --> buffer 1 and 2 len
4111 * | DES3 |
4112 * --------
4113 *
4114 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4115 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4116 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4117 {
4118 struct dma_desc *desc, *first, *mss_desc = NULL;
4119 struct stmmac_priv *priv = netdev_priv(dev);
4120 int nfrags = skb_shinfo(skb)->nr_frags;
4121 u32 queue = skb_get_queue_mapping(skb);
4122 unsigned int first_entry, tx_packets;
4123 struct stmmac_txq_stats *txq_stats;
4124 int tmp_pay_len = 0, first_tx;
4125 struct stmmac_tx_queue *tx_q;
4126 bool has_vlan, set_ic;
4127 dma_addr_t tso_des, des;
4128 u8 proto_hdr_len, hdr;
4129 u32 pay_len, mss;
4130 int i;
4131
4132 tx_q = &priv->dma_conf.tx_queue[queue];
4133 txq_stats = &priv->xstats.txq_stats[queue];
4134 first_tx = tx_q->cur_tx;
4135
4136 /* Compute header lengths */
4137 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4138 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4139 hdr = sizeof(struct udphdr);
4140 } else {
4141 proto_hdr_len = skb_tcp_all_headers(skb);
4142 hdr = tcp_hdrlen(skb);
4143 }
4144
4145 /* Desc availability based on threshold should be enough safe */
4146 if (unlikely(stmmac_tx_avail(priv, queue) <
4147 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4148 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4149 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4150 queue));
4151 /* This is a hard error, log it. */
4152 netdev_err(priv->dev,
4153 "%s: Tx Ring full when queue awake\n",
4154 __func__);
4155 }
4156 return NETDEV_TX_BUSY;
4157 }
4158
4159 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4160
4161 mss = skb_shinfo(skb)->gso_size;
4162
4163 /* set new MSS value if needed */
4164 if (mss != tx_q->mss) {
4165 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4166 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4167 else
4168 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4169
4170 stmmac_set_mss(priv, mss_desc, mss);
4171 tx_q->mss = mss;
4172 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4173 priv->dma_conf.dma_tx_size);
4174 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4175 }
4176
4177 if (netif_msg_tx_queued(priv)) {
4178 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4179 __func__, hdr, proto_hdr_len, pay_len, mss);
4180 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4181 skb->data_len);
4182 }
4183
4184 /* Check if VLAN can be inserted by HW */
4185 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4186
4187 first_entry = tx_q->cur_tx;
4188 WARN_ON(tx_q->tx_skbuff[first_entry]);
4189
4190 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4191 desc = &tx_q->dma_entx[first_entry].basic;
4192 else
4193 desc = &tx_q->dma_tx[first_entry];
4194 first = desc;
4195
4196 if (has_vlan)
4197 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4198
4199 /* first descriptor: fill Headers on Buf1 */
4200 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4201 DMA_TO_DEVICE);
4202 if (dma_mapping_error(priv->device, des))
4203 goto dma_map_err;
4204
4205 if (priv->dma_cap.addr64 <= 32) {
4206 first->des0 = cpu_to_le32(des);
4207
4208 /* Fill start of payload in buff2 of first descriptor */
4209 if (pay_len)
4210 first->des1 = cpu_to_le32(des + proto_hdr_len);
4211
4212 /* If needed take extra descriptors to fill the remaining payload */
4213 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4214 tso_des = des;
4215 } else {
4216 stmmac_set_desc_addr(priv, first, des);
4217 tmp_pay_len = pay_len;
4218 tso_des = des + proto_hdr_len;
4219 pay_len = 0;
4220 }
4221
4222 stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
4223
4224 /* In case two or more DMA transmit descriptors are allocated for this
4225 * non-paged SKB data, the DMA buffer address should be saved to
4226 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4227 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4228 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4229 * since the tail areas of the DMA buffer can be accessed by DMA engine
4230 * sooner or later.
4231 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4232 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4233 * this DMA buffer right after the DMA engine completely finishes the
4234 * full buffer transmission.
4235 */
4236 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4237 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4238 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4239 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4240
4241 /* Prepare fragments */
4242 for (i = 0; i < nfrags; i++) {
4243 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4244
4245 des = skb_frag_dma_map(priv->device, frag, 0,
4246 skb_frag_size(frag),
4247 DMA_TO_DEVICE);
4248 if (dma_mapping_error(priv->device, des))
4249 goto dma_map_err;
4250
4251 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4252 (i == nfrags - 1), queue);
4253
4254 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4255 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4256 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4257 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4258 }
4259
4260 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4261
4262 /* Only the last descriptor gets to point to the skb. */
4263 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4264 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4265
4266 /* Manage tx mitigation */
4267 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4268 tx_q->tx_count_frames += tx_packets;
4269
4270 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4271 set_ic = true;
4272 else if (!priv->tx_coal_frames[queue])
4273 set_ic = false;
4274 else if (tx_packets > priv->tx_coal_frames[queue])
4275 set_ic = true;
4276 else if ((tx_q->tx_count_frames %
4277 priv->tx_coal_frames[queue]) < tx_packets)
4278 set_ic = true;
4279 else
4280 set_ic = false;
4281
4282 if (set_ic) {
4283 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4284 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4285 else
4286 desc = &tx_q->dma_tx[tx_q->cur_tx];
4287
4288 tx_q->tx_count_frames = 0;
4289 stmmac_set_tx_ic(priv, desc);
4290 }
4291
4292 /* We've used all descriptors we need for this skb, however,
4293 * advance cur_tx so that it references a fresh descriptor.
4294 * ndo_start_xmit will fill this descriptor the next time it's
4295 * called and stmmac_tx_clean may clean up to this descriptor.
4296 */
4297 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4298
4299 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4300 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4301 __func__);
4302 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4303 }
4304
4305 u64_stats_update_begin(&txq_stats->q_syncp);
4306 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4307 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4308 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4309 if (set_ic)
4310 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4311 u64_stats_update_end(&txq_stats->q_syncp);
4312
4313 if (priv->sarc_type)
4314 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4315
4316 skb_tx_timestamp(skb);
4317
4318 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4319 priv->hwts_tx_en)) {
4320 /* declare that device is doing timestamping */
4321 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4322 stmmac_enable_tx_timestamp(priv, first);
4323 }
4324
4325 /* Complete the first descriptor before granting the DMA */
4326 stmmac_prepare_tso_tx_desc(priv, first, 1,
4327 proto_hdr_len,
4328 pay_len,
4329 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4330 hdr / 4, (skb->len - proto_hdr_len));
4331
4332 /* If context desc is used to change MSS */
4333 if (mss_desc) {
4334 /* Make sure that first descriptor has been completely
4335 * written, including its own bit. This is because MSS is
4336 * actually before first descriptor, so we need to make
4337 * sure that MSS's own bit is the last thing written.
4338 */
4339 dma_wmb();
4340 stmmac_set_tx_owner(priv, mss_desc);
4341 }
4342
4343 if (netif_msg_pktdata(priv)) {
4344 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4345 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4346 tx_q->cur_tx, first, nfrags);
4347 pr_info(">>> frame to be transmitted: ");
4348 print_pkt(skb->data, skb_headlen(skb));
4349 }
4350
4351 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4352
4353 stmmac_flush_tx_descriptors(priv, queue);
4354 stmmac_tx_timer_arm(priv, queue);
4355
4356 return NETDEV_TX_OK;
4357
4358 dma_map_err:
4359 dev_err(priv->device, "Tx dma map failed\n");
4360 dev_kfree_skb(skb);
4361 priv->xstats.tx_dropped++;
4362 return NETDEV_TX_OK;
4363 }
4364
4365 /**
4366 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4367 * @skb: socket buffer to check
4368 *
4369 * Check if a packet has an ethertype that will trigger the IP header checks
4370 * and IP/TCP checksum engine of the stmmac core.
4371 *
4372 * Return: true if the ethertype can trigger the checksum engine, false
4373 * otherwise
4374 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4375 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4376 {
4377 int depth = 0;
4378 __be16 proto;
4379
4380 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4381 &depth);
4382
4383 return (depth <= ETH_HLEN) &&
4384 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4385 }
4386
4387 /**
4388 * stmmac_xmit - Tx entry point of the driver
4389 * @skb : the socket buffer
4390 * @dev : device pointer
4391 * Description : this is the tx entry point of the driver.
4392 * It programs the chain or the ring and supports oversized frames
4393 * and SG feature.
4394 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4395 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4396 {
4397 unsigned int first_entry, tx_packets, enh_desc;
4398 struct stmmac_priv *priv = netdev_priv(dev);
4399 unsigned int nopaged_len = skb_headlen(skb);
4400 int i, csum_insertion = 0, is_jumbo = 0;
4401 u32 queue = skb_get_queue_mapping(skb);
4402 int nfrags = skb_shinfo(skb)->nr_frags;
4403 int gso = skb_shinfo(skb)->gso_type;
4404 struct stmmac_txq_stats *txq_stats;
4405 struct dma_edesc *tbs_desc = NULL;
4406 struct dma_desc *desc, *first;
4407 struct stmmac_tx_queue *tx_q;
4408 bool has_vlan, set_ic;
4409 int entry, first_tx;
4410 dma_addr_t des;
4411
4412 tx_q = &priv->dma_conf.tx_queue[queue];
4413 txq_stats = &priv->xstats.txq_stats[queue];
4414 first_tx = tx_q->cur_tx;
4415
4416 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4417 stmmac_disable_eee_mode(priv);
4418
4419 /* Manage oversized TCP frames for GMAC4 device */
4420 if (skb_is_gso(skb) && priv->tso) {
4421 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4422 return stmmac_tso_xmit(skb, dev);
4423 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4424 return stmmac_tso_xmit(skb, dev);
4425 }
4426
4427 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4428 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4429 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4430 queue));
4431 /* This is a hard error, log it. */
4432 netdev_err(priv->dev,
4433 "%s: Tx Ring full when queue awake\n",
4434 __func__);
4435 }
4436 return NETDEV_TX_BUSY;
4437 }
4438
4439 /* Check if VLAN can be inserted by HW */
4440 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4441
4442 entry = tx_q->cur_tx;
4443 first_entry = entry;
4444 WARN_ON(tx_q->tx_skbuff[first_entry]);
4445
4446 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4447 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4448 * queues. In that case, checksum offloading for those queues that don't
4449 * support tx coe needs to fallback to software checksum calculation.
4450 *
4451 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4452 * also have to be checksummed in software.
4453 */
4454 if (csum_insertion &&
4455 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4456 !stmmac_has_ip_ethertype(skb))) {
4457 if (unlikely(skb_checksum_help(skb)))
4458 goto dma_map_err;
4459 csum_insertion = !csum_insertion;
4460 }
4461
4462 if (likely(priv->extend_desc))
4463 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4464 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4465 desc = &tx_q->dma_entx[entry].basic;
4466 else
4467 desc = tx_q->dma_tx + entry;
4468
4469 first = desc;
4470
4471 if (has_vlan)
4472 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4473
4474 enh_desc = priv->plat->enh_desc;
4475 /* To program the descriptors according to the size of the frame */
4476 if (enh_desc)
4477 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4478
4479 if (unlikely(is_jumbo)) {
4480 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4481 if (unlikely(entry < 0) && (entry != -EINVAL))
4482 goto dma_map_err;
4483 }
4484
4485 for (i = 0; i < nfrags; i++) {
4486 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4487 int len = skb_frag_size(frag);
4488 bool last_segment = (i == (nfrags - 1));
4489
4490 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4491 WARN_ON(tx_q->tx_skbuff[entry]);
4492
4493 if (likely(priv->extend_desc))
4494 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4495 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4496 desc = &tx_q->dma_entx[entry].basic;
4497 else
4498 desc = tx_q->dma_tx + entry;
4499
4500 des = skb_frag_dma_map(priv->device, frag, 0, len,
4501 DMA_TO_DEVICE);
4502 if (dma_mapping_error(priv->device, des))
4503 goto dma_map_err; /* should reuse desc w/o issues */
4504
4505 tx_q->tx_skbuff_dma[entry].buf = des;
4506
4507 stmmac_set_desc_addr(priv, desc, des);
4508
4509 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4510 tx_q->tx_skbuff_dma[entry].len = len;
4511 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4512 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4513
4514 /* Prepare the descriptor and set the own bit too */
4515 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4516 priv->mode, 1, last_segment, skb->len);
4517 }
4518
4519 /* Only the last descriptor gets to point to the skb. */
4520 tx_q->tx_skbuff[entry] = skb;
4521 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4522
4523 /* According to the coalesce parameter the IC bit for the latest
4524 * segment is reset and the timer re-started to clean the tx status.
4525 * This approach takes care about the fragments: desc is the first
4526 * element in case of no SG.
4527 */
4528 tx_packets = (entry + 1) - first_tx;
4529 tx_q->tx_count_frames += tx_packets;
4530
4531 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4532 set_ic = true;
4533 else if (!priv->tx_coal_frames[queue])
4534 set_ic = false;
4535 else if (tx_packets > priv->tx_coal_frames[queue])
4536 set_ic = true;
4537 else if ((tx_q->tx_count_frames %
4538 priv->tx_coal_frames[queue]) < tx_packets)
4539 set_ic = true;
4540 else
4541 set_ic = false;
4542
4543 if (set_ic) {
4544 if (likely(priv->extend_desc))
4545 desc = &tx_q->dma_etx[entry].basic;
4546 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4547 desc = &tx_q->dma_entx[entry].basic;
4548 else
4549 desc = &tx_q->dma_tx[entry];
4550
4551 tx_q->tx_count_frames = 0;
4552 stmmac_set_tx_ic(priv, desc);
4553 }
4554
4555 /* We've used all descriptors we need for this skb, however,
4556 * advance cur_tx so that it references a fresh descriptor.
4557 * ndo_start_xmit will fill this descriptor the next time it's
4558 * called and stmmac_tx_clean may clean up to this descriptor.
4559 */
4560 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4561 tx_q->cur_tx = entry;
4562
4563 if (netif_msg_pktdata(priv)) {
4564 netdev_dbg(priv->dev,
4565 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4566 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4567 entry, first, nfrags);
4568
4569 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4570 print_pkt(skb->data, skb->len);
4571 }
4572
4573 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4574 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4575 __func__);
4576 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4577 }
4578
4579 u64_stats_update_begin(&txq_stats->q_syncp);
4580 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4581 if (set_ic)
4582 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4583 u64_stats_update_end(&txq_stats->q_syncp);
4584
4585 if (priv->sarc_type)
4586 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4587
4588 skb_tx_timestamp(skb);
4589
4590 /* Ready to fill the first descriptor and set the OWN bit w/o any
4591 * problems because all the descriptors are actually ready to be
4592 * passed to the DMA engine.
4593 */
4594 if (likely(!is_jumbo)) {
4595 bool last_segment = (nfrags == 0);
4596
4597 des = dma_map_single(priv->device, skb->data,
4598 nopaged_len, DMA_TO_DEVICE);
4599 if (dma_mapping_error(priv->device, des))
4600 goto dma_map_err;
4601
4602 tx_q->tx_skbuff_dma[first_entry].buf = des;
4603 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4604 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4605
4606 stmmac_set_desc_addr(priv, first, des);
4607
4608 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4609 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4610
4611 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4612 priv->hwts_tx_en)) {
4613 /* declare that device is doing timestamping */
4614 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4615 stmmac_enable_tx_timestamp(priv, first);
4616 }
4617
4618 /* Prepare the first descriptor setting the OWN bit too */
4619 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4620 csum_insertion, priv->mode, 0, last_segment,
4621 skb->len);
4622 }
4623
4624 if (tx_q->tbs & STMMAC_TBS_EN) {
4625 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4626
4627 tbs_desc = &tx_q->dma_entx[first_entry];
4628 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4629 }
4630
4631 stmmac_set_tx_owner(priv, first);
4632
4633 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4634
4635 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4636
4637 stmmac_flush_tx_descriptors(priv, queue);
4638 stmmac_tx_timer_arm(priv, queue);
4639
4640 return NETDEV_TX_OK;
4641
4642 dma_map_err:
4643 netdev_err(priv->dev, "Tx DMA map failed\n");
4644 dev_kfree_skb(skb);
4645 priv->xstats.tx_dropped++;
4646 return NETDEV_TX_OK;
4647 }
4648
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4649 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4650 {
4651 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4652 __be16 vlan_proto = veth->h_vlan_proto;
4653 u16 vlanid;
4654
4655 if ((vlan_proto == htons(ETH_P_8021Q) &&
4656 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4657 (vlan_proto == htons(ETH_P_8021AD) &&
4658 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4659 /* pop the vlan tag */
4660 vlanid = ntohs(veth->h_vlan_TCI);
4661 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4662 skb_pull(skb, VLAN_HLEN);
4663 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4664 }
4665 }
4666
4667 /**
4668 * stmmac_rx_refill - refill used skb preallocated buffers
4669 * @priv: driver private structure
4670 * @queue: RX queue index
4671 * Description : this is to reallocate the skb for the reception process
4672 * that is based on zero-copy.
4673 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4674 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4675 {
4676 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4677 int dirty = stmmac_rx_dirty(priv, queue);
4678 unsigned int entry = rx_q->dirty_rx;
4679 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4680
4681 if (priv->dma_cap.host_dma_width <= 32)
4682 gfp |= GFP_DMA32;
4683
4684 while (dirty-- > 0) {
4685 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4686 struct dma_desc *p;
4687 bool use_rx_wd;
4688
4689 if (priv->extend_desc)
4690 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4691 else
4692 p = rx_q->dma_rx + entry;
4693
4694 if (!buf->page) {
4695 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4696 if (!buf->page)
4697 break;
4698 }
4699
4700 if (priv->sph && !buf->sec_page) {
4701 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4702 if (!buf->sec_page)
4703 break;
4704
4705 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4706 }
4707
4708 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4709
4710 stmmac_set_desc_addr(priv, p, buf->addr);
4711 if (priv->sph)
4712 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4713 else
4714 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4715 stmmac_refill_desc3(priv, rx_q, p);
4716
4717 rx_q->rx_count_frames++;
4718 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4719 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4720 rx_q->rx_count_frames = 0;
4721
4722 use_rx_wd = !priv->rx_coal_frames[queue];
4723 use_rx_wd |= rx_q->rx_count_frames > 0;
4724 if (!priv->use_riwt)
4725 use_rx_wd = false;
4726
4727 dma_wmb();
4728 stmmac_set_rx_owner(priv, p, use_rx_wd);
4729
4730 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4731 }
4732 rx_q->dirty_rx = entry;
4733 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4734 (rx_q->dirty_rx * sizeof(struct dma_desc));
4735 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4736 }
4737
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4738 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4739 struct dma_desc *p,
4740 int status, unsigned int len)
4741 {
4742 unsigned int plen = 0, hlen = 0;
4743 int coe = priv->hw->rx_csum;
4744
4745 /* Not first descriptor, buffer is always zero */
4746 if (priv->sph && len)
4747 return 0;
4748
4749 /* First descriptor, get split header length */
4750 stmmac_get_rx_header_len(priv, p, &hlen);
4751 if (priv->sph && hlen) {
4752 priv->xstats.rx_split_hdr_pkt_n++;
4753 return hlen;
4754 }
4755
4756 /* First descriptor, not last descriptor and not split header */
4757 if (status & rx_not_ls)
4758 return priv->dma_conf.dma_buf_sz;
4759
4760 plen = stmmac_get_rx_frame_len(priv, p, coe);
4761
4762 /* First descriptor and last descriptor and not split header */
4763 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4764 }
4765
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4766 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4767 struct dma_desc *p,
4768 int status, unsigned int len)
4769 {
4770 int coe = priv->hw->rx_csum;
4771 unsigned int plen = 0;
4772
4773 /* Not split header, buffer is not available */
4774 if (!priv->sph)
4775 return 0;
4776
4777 /* Not last descriptor */
4778 if (status & rx_not_ls)
4779 return priv->dma_conf.dma_buf_sz;
4780
4781 plen = stmmac_get_rx_frame_len(priv, p, coe);
4782
4783 /* Last descriptor */
4784 return plen - len;
4785 }
4786
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4787 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4788 struct xdp_frame *xdpf, bool dma_map)
4789 {
4790 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4791 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4792 unsigned int entry = tx_q->cur_tx;
4793 struct dma_desc *tx_desc;
4794 dma_addr_t dma_addr;
4795 bool set_ic;
4796
4797 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4798 return STMMAC_XDP_CONSUMED;
4799
4800 if (likely(priv->extend_desc))
4801 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4802 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4803 tx_desc = &tx_q->dma_entx[entry].basic;
4804 else
4805 tx_desc = tx_q->dma_tx + entry;
4806
4807 if (dma_map) {
4808 dma_addr = dma_map_single(priv->device, xdpf->data,
4809 xdpf->len, DMA_TO_DEVICE);
4810 if (dma_mapping_error(priv->device, dma_addr))
4811 return STMMAC_XDP_CONSUMED;
4812
4813 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4814 } else {
4815 struct page *page = virt_to_page(xdpf->data);
4816
4817 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4818 xdpf->headroom;
4819 dma_sync_single_for_device(priv->device, dma_addr,
4820 xdpf->len, DMA_BIDIRECTIONAL);
4821
4822 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4823 }
4824
4825 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4826 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4827 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4828 tx_q->tx_skbuff_dma[entry].last_segment = true;
4829 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4830
4831 tx_q->xdpf[entry] = xdpf;
4832
4833 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4834
4835 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4836 true, priv->mode, true, true,
4837 xdpf->len);
4838
4839 tx_q->tx_count_frames++;
4840
4841 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4842 set_ic = true;
4843 else
4844 set_ic = false;
4845
4846 if (set_ic) {
4847 tx_q->tx_count_frames = 0;
4848 stmmac_set_tx_ic(priv, tx_desc);
4849 u64_stats_update_begin(&txq_stats->q_syncp);
4850 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4851 u64_stats_update_end(&txq_stats->q_syncp);
4852 }
4853
4854 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4855
4856 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4857 tx_q->cur_tx = entry;
4858
4859 return STMMAC_XDP_TX;
4860 }
4861
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4862 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4863 int cpu)
4864 {
4865 int index = cpu;
4866
4867 if (unlikely(index < 0))
4868 index = 0;
4869
4870 while (index >= priv->plat->tx_queues_to_use)
4871 index -= priv->plat->tx_queues_to_use;
4872
4873 return index;
4874 }
4875
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4876 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4877 struct xdp_buff *xdp)
4878 {
4879 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4880 int cpu = smp_processor_id();
4881 struct netdev_queue *nq;
4882 int queue;
4883 int res;
4884
4885 if (unlikely(!xdpf))
4886 return STMMAC_XDP_CONSUMED;
4887
4888 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4889 nq = netdev_get_tx_queue(priv->dev, queue);
4890
4891 __netif_tx_lock(nq, cpu);
4892 /* Avoids TX time-out as we are sharing with slow path */
4893 txq_trans_cond_update(nq);
4894
4895 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4896 if (res == STMMAC_XDP_TX)
4897 stmmac_flush_tx_descriptors(priv, queue);
4898
4899 __netif_tx_unlock(nq);
4900
4901 return res;
4902 }
4903
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4904 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4905 struct bpf_prog *prog,
4906 struct xdp_buff *xdp)
4907 {
4908 u32 act;
4909 int res;
4910
4911 act = bpf_prog_run_xdp(prog, xdp);
4912 switch (act) {
4913 case XDP_PASS:
4914 res = STMMAC_XDP_PASS;
4915 break;
4916 case XDP_TX:
4917 res = stmmac_xdp_xmit_back(priv, xdp);
4918 break;
4919 case XDP_REDIRECT:
4920 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4921 res = STMMAC_XDP_CONSUMED;
4922 else
4923 res = STMMAC_XDP_REDIRECT;
4924 break;
4925 default:
4926 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4927 fallthrough;
4928 case XDP_ABORTED:
4929 trace_xdp_exception(priv->dev, prog, act);
4930 fallthrough;
4931 case XDP_DROP:
4932 res = STMMAC_XDP_CONSUMED;
4933 break;
4934 }
4935
4936 return res;
4937 }
4938
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)4939 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4940 struct xdp_buff *xdp)
4941 {
4942 struct bpf_prog *prog;
4943 int res;
4944
4945 prog = READ_ONCE(priv->xdp_prog);
4946 if (!prog) {
4947 res = STMMAC_XDP_PASS;
4948 goto out;
4949 }
4950
4951 res = __stmmac_xdp_run_prog(priv, prog, xdp);
4952 out:
4953 return ERR_PTR(-res);
4954 }
4955
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)4956 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4957 int xdp_status)
4958 {
4959 int cpu = smp_processor_id();
4960 int queue;
4961
4962 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4963
4964 if (xdp_status & STMMAC_XDP_TX)
4965 stmmac_tx_timer_arm(priv, queue);
4966
4967 if (xdp_status & STMMAC_XDP_REDIRECT)
4968 xdp_do_flush();
4969 }
4970
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)4971 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4972 struct xdp_buff *xdp)
4973 {
4974 unsigned int metasize = xdp->data - xdp->data_meta;
4975 unsigned int datasize = xdp->data_end - xdp->data;
4976 struct sk_buff *skb;
4977
4978 skb = __napi_alloc_skb(&ch->rxtx_napi,
4979 xdp->data_end - xdp->data_hard_start,
4980 GFP_ATOMIC | __GFP_NOWARN);
4981 if (unlikely(!skb))
4982 return NULL;
4983
4984 skb_reserve(skb, xdp->data - xdp->data_hard_start);
4985 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4986 if (metasize)
4987 skb_metadata_set(skb, metasize);
4988
4989 return skb;
4990 }
4991
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)4992 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4993 struct dma_desc *p, struct dma_desc *np,
4994 struct xdp_buff *xdp)
4995 {
4996 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4997 struct stmmac_channel *ch = &priv->channel[queue];
4998 unsigned int len = xdp->data_end - xdp->data;
4999 enum pkt_hash_types hash_type;
5000 int coe = priv->hw->rx_csum;
5001 struct sk_buff *skb;
5002 u32 hash;
5003
5004 skb = stmmac_construct_skb_zc(ch, xdp);
5005 if (!skb) {
5006 priv->xstats.rx_dropped++;
5007 return;
5008 }
5009
5010 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5011 stmmac_rx_vlan(priv->dev, skb);
5012 skb->protocol = eth_type_trans(skb, priv->dev);
5013
5014 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5015 skb_checksum_none_assert(skb);
5016 else
5017 skb->ip_summed = CHECKSUM_UNNECESSARY;
5018
5019 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5020 skb_set_hash(skb, hash, hash_type);
5021
5022 skb_record_rx_queue(skb, queue);
5023 napi_gro_receive(&ch->rxtx_napi, skb);
5024
5025 u64_stats_update_begin(&rxq_stats->napi_syncp);
5026 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5027 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5028 u64_stats_update_end(&rxq_stats->napi_syncp);
5029 }
5030
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5031 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5032 {
5033 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5034 unsigned int entry = rx_q->dirty_rx;
5035 struct dma_desc *rx_desc = NULL;
5036 bool ret = true;
5037
5038 budget = min(budget, stmmac_rx_dirty(priv, queue));
5039
5040 while (budget-- > 0 && entry != rx_q->cur_rx) {
5041 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5042 dma_addr_t dma_addr;
5043 bool use_rx_wd;
5044
5045 if (!buf->xdp) {
5046 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5047 if (!buf->xdp) {
5048 ret = false;
5049 break;
5050 }
5051 }
5052
5053 if (priv->extend_desc)
5054 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5055 else
5056 rx_desc = rx_q->dma_rx + entry;
5057
5058 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5059 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5060 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5061 stmmac_refill_desc3(priv, rx_q, rx_desc);
5062
5063 rx_q->rx_count_frames++;
5064 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5065 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5066 rx_q->rx_count_frames = 0;
5067
5068 use_rx_wd = !priv->rx_coal_frames[queue];
5069 use_rx_wd |= rx_q->rx_count_frames > 0;
5070 if (!priv->use_riwt)
5071 use_rx_wd = false;
5072
5073 dma_wmb();
5074 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5075
5076 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5077 }
5078
5079 if (rx_desc) {
5080 rx_q->dirty_rx = entry;
5081 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5082 (rx_q->dirty_rx * sizeof(struct dma_desc));
5083 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5084 }
5085
5086 return ret;
5087 }
5088
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5089 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5090 {
5091 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5092 * to represent incoming packet, whereas cb field in the same structure
5093 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5094 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5095 */
5096 return (struct stmmac_xdp_buff *)xdp;
5097 }
5098
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5099 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5100 {
5101 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5102 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5103 unsigned int count = 0, error = 0, len = 0;
5104 int dirty = stmmac_rx_dirty(priv, queue);
5105 unsigned int next_entry = rx_q->cur_rx;
5106 u32 rx_errors = 0, rx_dropped = 0;
5107 unsigned int desc_size;
5108 struct bpf_prog *prog;
5109 bool failure = false;
5110 int xdp_status = 0;
5111 int status = 0;
5112
5113 if (netif_msg_rx_status(priv)) {
5114 void *rx_head;
5115
5116 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5117 if (priv->extend_desc) {
5118 rx_head = (void *)rx_q->dma_erx;
5119 desc_size = sizeof(struct dma_extended_desc);
5120 } else {
5121 rx_head = (void *)rx_q->dma_rx;
5122 desc_size = sizeof(struct dma_desc);
5123 }
5124
5125 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5126 rx_q->dma_rx_phy, desc_size);
5127 }
5128 while (count < limit) {
5129 struct stmmac_rx_buffer *buf;
5130 struct stmmac_xdp_buff *ctx;
5131 unsigned int buf1_len = 0;
5132 struct dma_desc *np, *p;
5133 int entry;
5134 int res;
5135
5136 if (!count && rx_q->state_saved) {
5137 error = rx_q->state.error;
5138 len = rx_q->state.len;
5139 } else {
5140 rx_q->state_saved = false;
5141 error = 0;
5142 len = 0;
5143 }
5144
5145 if (count >= limit)
5146 break;
5147
5148 read_again:
5149 buf1_len = 0;
5150 entry = next_entry;
5151 buf = &rx_q->buf_pool[entry];
5152
5153 if (dirty >= STMMAC_RX_FILL_BATCH) {
5154 failure = failure ||
5155 !stmmac_rx_refill_zc(priv, queue, dirty);
5156 dirty = 0;
5157 }
5158
5159 if (priv->extend_desc)
5160 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5161 else
5162 p = rx_q->dma_rx + entry;
5163
5164 /* read the status of the incoming frame */
5165 status = stmmac_rx_status(priv, &priv->xstats, p);
5166 /* check if managed by the DMA otherwise go ahead */
5167 if (unlikely(status & dma_own))
5168 break;
5169
5170 /* Prefetch the next RX descriptor */
5171 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5172 priv->dma_conf.dma_rx_size);
5173 next_entry = rx_q->cur_rx;
5174
5175 if (priv->extend_desc)
5176 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5177 else
5178 np = rx_q->dma_rx + next_entry;
5179
5180 prefetch(np);
5181
5182 /* Ensure a valid XSK buffer before proceed */
5183 if (!buf->xdp)
5184 break;
5185
5186 if (priv->extend_desc)
5187 stmmac_rx_extended_status(priv, &priv->xstats,
5188 rx_q->dma_erx + entry);
5189 if (unlikely(status == discard_frame)) {
5190 xsk_buff_free(buf->xdp);
5191 buf->xdp = NULL;
5192 dirty++;
5193 error = 1;
5194 if (!priv->hwts_rx_en)
5195 rx_errors++;
5196 }
5197
5198 if (unlikely(error && (status & rx_not_ls)))
5199 goto read_again;
5200 if (unlikely(error)) {
5201 count++;
5202 continue;
5203 }
5204
5205 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5206 if (likely(status & rx_not_ls)) {
5207 xsk_buff_free(buf->xdp);
5208 buf->xdp = NULL;
5209 dirty++;
5210 count++;
5211 goto read_again;
5212 }
5213
5214 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5215 ctx->priv = priv;
5216 ctx->desc = p;
5217 ctx->ndesc = np;
5218
5219 /* XDP ZC Frame only support primary buffers for now */
5220 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5221 len += buf1_len;
5222
5223 /* ACS is disabled; strip manually. */
5224 if (likely(!(status & rx_not_ls))) {
5225 buf1_len -= ETH_FCS_LEN;
5226 len -= ETH_FCS_LEN;
5227 }
5228
5229 /* RX buffer is good and fit into a XSK pool buffer */
5230 buf->xdp->data_end = buf->xdp->data + buf1_len;
5231 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5232
5233 prog = READ_ONCE(priv->xdp_prog);
5234 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5235
5236 switch (res) {
5237 case STMMAC_XDP_PASS:
5238 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5239 xsk_buff_free(buf->xdp);
5240 break;
5241 case STMMAC_XDP_CONSUMED:
5242 xsk_buff_free(buf->xdp);
5243 rx_dropped++;
5244 break;
5245 case STMMAC_XDP_TX:
5246 case STMMAC_XDP_REDIRECT:
5247 xdp_status |= res;
5248 break;
5249 }
5250
5251 buf->xdp = NULL;
5252 dirty++;
5253 count++;
5254 }
5255
5256 if (status & rx_not_ls) {
5257 rx_q->state_saved = true;
5258 rx_q->state.error = error;
5259 rx_q->state.len = len;
5260 }
5261
5262 stmmac_finalize_xdp_rx(priv, xdp_status);
5263
5264 u64_stats_update_begin(&rxq_stats->napi_syncp);
5265 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5266 u64_stats_update_end(&rxq_stats->napi_syncp);
5267
5268 priv->xstats.rx_dropped += rx_dropped;
5269 priv->xstats.rx_errors += rx_errors;
5270
5271 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5272 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5273 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5274 else
5275 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5276
5277 return (int)count;
5278 }
5279
5280 return failure ? limit : (int)count;
5281 }
5282
5283 /**
5284 * stmmac_rx - manage the receive process
5285 * @priv: driver private structure
5286 * @limit: napi bugget
5287 * @queue: RX queue index.
5288 * Description : this the function called by the napi poll method.
5289 * It gets all the frames inside the ring.
5290 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5291 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5292 {
5293 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5294 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5295 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5296 struct stmmac_channel *ch = &priv->channel[queue];
5297 unsigned int count = 0, error = 0, len = 0;
5298 int status = 0, coe = priv->hw->rx_csum;
5299 unsigned int next_entry = rx_q->cur_rx;
5300 enum dma_data_direction dma_dir;
5301 unsigned int desc_size;
5302 struct sk_buff *skb = NULL;
5303 struct stmmac_xdp_buff ctx;
5304 int xdp_status = 0;
5305 int buf_sz;
5306
5307 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5308 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5309 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5310
5311 if (netif_msg_rx_status(priv)) {
5312 void *rx_head;
5313
5314 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5315 if (priv->extend_desc) {
5316 rx_head = (void *)rx_q->dma_erx;
5317 desc_size = sizeof(struct dma_extended_desc);
5318 } else {
5319 rx_head = (void *)rx_q->dma_rx;
5320 desc_size = sizeof(struct dma_desc);
5321 }
5322
5323 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5324 rx_q->dma_rx_phy, desc_size);
5325 }
5326 while (count < limit) {
5327 unsigned int buf1_len = 0, buf2_len = 0;
5328 enum pkt_hash_types hash_type;
5329 struct stmmac_rx_buffer *buf;
5330 struct dma_desc *np, *p;
5331 int entry;
5332 u32 hash;
5333
5334 if (!count && rx_q->state_saved) {
5335 skb = rx_q->state.skb;
5336 error = rx_q->state.error;
5337 len = rx_q->state.len;
5338 } else {
5339 rx_q->state_saved = false;
5340 skb = NULL;
5341 error = 0;
5342 len = 0;
5343 }
5344
5345 read_again:
5346 if (count >= limit)
5347 break;
5348
5349 buf1_len = 0;
5350 buf2_len = 0;
5351 entry = next_entry;
5352 buf = &rx_q->buf_pool[entry];
5353
5354 if (priv->extend_desc)
5355 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5356 else
5357 p = rx_q->dma_rx + entry;
5358
5359 /* read the status of the incoming frame */
5360 status = stmmac_rx_status(priv, &priv->xstats, p);
5361 /* check if managed by the DMA otherwise go ahead */
5362 if (unlikely(status & dma_own))
5363 break;
5364
5365 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5366 priv->dma_conf.dma_rx_size);
5367 next_entry = rx_q->cur_rx;
5368
5369 if (priv->extend_desc)
5370 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5371 else
5372 np = rx_q->dma_rx + next_entry;
5373
5374 prefetch(np);
5375
5376 if (priv->extend_desc)
5377 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5378 if (unlikely(status == discard_frame)) {
5379 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5380 buf->page = NULL;
5381 error = 1;
5382 if (!priv->hwts_rx_en)
5383 rx_errors++;
5384 }
5385
5386 if (unlikely(error && (status & rx_not_ls)))
5387 goto read_again;
5388 if (unlikely(error)) {
5389 dev_kfree_skb(skb);
5390 skb = NULL;
5391 count++;
5392 continue;
5393 }
5394
5395 /* Buffer is good. Go on. */
5396
5397 prefetch(page_address(buf->page) + buf->page_offset);
5398 if (buf->sec_page)
5399 prefetch(page_address(buf->sec_page));
5400
5401 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5402 len += buf1_len;
5403 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5404 len += buf2_len;
5405
5406 /* ACS is disabled; strip manually. */
5407 if (likely(!(status & rx_not_ls))) {
5408 if (buf2_len) {
5409 buf2_len -= ETH_FCS_LEN;
5410 len -= ETH_FCS_LEN;
5411 } else if (buf1_len) {
5412 buf1_len -= ETH_FCS_LEN;
5413 len -= ETH_FCS_LEN;
5414 }
5415 }
5416
5417 if (!skb) {
5418 unsigned int pre_len, sync_len;
5419
5420 dma_sync_single_for_cpu(priv->device, buf->addr,
5421 buf1_len, dma_dir);
5422
5423 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5424 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5425 buf->page_offset, buf1_len, true);
5426
5427 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5428 buf->page_offset;
5429
5430 ctx.priv = priv;
5431 ctx.desc = p;
5432 ctx.ndesc = np;
5433
5434 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5435 /* Due xdp_adjust_tail: DMA sync for_device
5436 * cover max len CPU touch
5437 */
5438 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5439 buf->page_offset;
5440 sync_len = max(sync_len, pre_len);
5441
5442 /* For Not XDP_PASS verdict */
5443 if (IS_ERR(skb)) {
5444 unsigned int xdp_res = -PTR_ERR(skb);
5445
5446 if (xdp_res & STMMAC_XDP_CONSUMED) {
5447 page_pool_put_page(rx_q->page_pool,
5448 virt_to_head_page(ctx.xdp.data),
5449 sync_len, true);
5450 buf->page = NULL;
5451 rx_dropped++;
5452
5453 /* Clear skb as it was set as
5454 * status by XDP program.
5455 */
5456 skb = NULL;
5457
5458 if (unlikely((status & rx_not_ls)))
5459 goto read_again;
5460
5461 count++;
5462 continue;
5463 } else if (xdp_res & (STMMAC_XDP_TX |
5464 STMMAC_XDP_REDIRECT)) {
5465 xdp_status |= xdp_res;
5466 buf->page = NULL;
5467 skb = NULL;
5468 count++;
5469 continue;
5470 }
5471 }
5472 }
5473
5474 if (!skb) {
5475 /* XDP program may expand or reduce tail */
5476 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5477
5478 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5479 if (!skb) {
5480 rx_dropped++;
5481 count++;
5482 goto drain_data;
5483 }
5484
5485 /* XDP program may adjust header */
5486 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5487 skb_put(skb, buf1_len);
5488
5489 /* Data payload copied into SKB, page ready for recycle */
5490 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5491 buf->page = NULL;
5492 } else if (buf1_len) {
5493 dma_sync_single_for_cpu(priv->device, buf->addr,
5494 buf1_len, dma_dir);
5495 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5496 buf->page, buf->page_offset, buf1_len,
5497 priv->dma_conf.dma_buf_sz);
5498
5499 /* Data payload appended into SKB */
5500 skb_mark_for_recycle(skb);
5501 buf->page = NULL;
5502 }
5503
5504 if (buf2_len) {
5505 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5506 buf2_len, dma_dir);
5507 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5508 buf->sec_page, 0, buf2_len,
5509 priv->dma_conf.dma_buf_sz);
5510
5511 /* Data payload appended into SKB */
5512 skb_mark_for_recycle(skb);
5513 buf->sec_page = NULL;
5514 }
5515
5516 drain_data:
5517 if (likely(status & rx_not_ls))
5518 goto read_again;
5519 if (!skb)
5520 continue;
5521
5522 /* Got entire packet into SKB. Finish it. */
5523
5524 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5525 stmmac_rx_vlan(priv->dev, skb);
5526 skb->protocol = eth_type_trans(skb, priv->dev);
5527
5528 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5529 skb_checksum_none_assert(skb);
5530 else
5531 skb->ip_summed = CHECKSUM_UNNECESSARY;
5532
5533 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5534 skb_set_hash(skb, hash, hash_type);
5535
5536 skb_record_rx_queue(skb, queue);
5537 napi_gro_receive(&ch->rx_napi, skb);
5538 skb = NULL;
5539
5540 rx_packets++;
5541 rx_bytes += len;
5542 count++;
5543 }
5544
5545 if (status & rx_not_ls || skb) {
5546 rx_q->state_saved = true;
5547 rx_q->state.skb = skb;
5548 rx_q->state.error = error;
5549 rx_q->state.len = len;
5550 }
5551
5552 stmmac_finalize_xdp_rx(priv, xdp_status);
5553
5554 stmmac_rx_refill(priv, queue);
5555
5556 u64_stats_update_begin(&rxq_stats->napi_syncp);
5557 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5558 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5559 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5560 u64_stats_update_end(&rxq_stats->napi_syncp);
5561
5562 priv->xstats.rx_dropped += rx_dropped;
5563 priv->xstats.rx_errors += rx_errors;
5564
5565 return count;
5566 }
5567
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5568 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5569 {
5570 struct stmmac_channel *ch =
5571 container_of(napi, struct stmmac_channel, rx_napi);
5572 struct stmmac_priv *priv = ch->priv_data;
5573 struct stmmac_rxq_stats *rxq_stats;
5574 u32 chan = ch->index;
5575 int work_done;
5576
5577 rxq_stats = &priv->xstats.rxq_stats[chan];
5578 u64_stats_update_begin(&rxq_stats->napi_syncp);
5579 u64_stats_inc(&rxq_stats->napi.poll);
5580 u64_stats_update_end(&rxq_stats->napi_syncp);
5581
5582 work_done = stmmac_rx(priv, budget, chan);
5583 if (work_done < budget && napi_complete_done(napi, work_done)) {
5584 unsigned long flags;
5585
5586 spin_lock_irqsave(&ch->lock, flags);
5587 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5588 spin_unlock_irqrestore(&ch->lock, flags);
5589 }
5590
5591 return work_done;
5592 }
5593
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5594 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5595 {
5596 struct stmmac_channel *ch =
5597 container_of(napi, struct stmmac_channel, tx_napi);
5598 struct stmmac_priv *priv = ch->priv_data;
5599 struct stmmac_txq_stats *txq_stats;
5600 u32 chan = ch->index;
5601 int work_done;
5602
5603 txq_stats = &priv->xstats.txq_stats[chan];
5604 u64_stats_update_begin(&txq_stats->napi_syncp);
5605 u64_stats_inc(&txq_stats->napi.poll);
5606 u64_stats_update_end(&txq_stats->napi_syncp);
5607
5608 work_done = stmmac_tx_clean(priv, budget, chan);
5609 work_done = min(work_done, budget);
5610
5611 if (work_done < budget && napi_complete_done(napi, work_done)) {
5612 unsigned long flags;
5613
5614 spin_lock_irqsave(&ch->lock, flags);
5615 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5616 spin_unlock_irqrestore(&ch->lock, flags);
5617 }
5618
5619 return work_done;
5620 }
5621
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5622 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5623 {
5624 struct stmmac_channel *ch =
5625 container_of(napi, struct stmmac_channel, rxtx_napi);
5626 struct stmmac_priv *priv = ch->priv_data;
5627 int rx_done, tx_done, rxtx_done;
5628 struct stmmac_rxq_stats *rxq_stats;
5629 struct stmmac_txq_stats *txq_stats;
5630 u32 chan = ch->index;
5631
5632 rxq_stats = &priv->xstats.rxq_stats[chan];
5633 u64_stats_update_begin(&rxq_stats->napi_syncp);
5634 u64_stats_inc(&rxq_stats->napi.poll);
5635 u64_stats_update_end(&rxq_stats->napi_syncp);
5636
5637 txq_stats = &priv->xstats.txq_stats[chan];
5638 u64_stats_update_begin(&txq_stats->napi_syncp);
5639 u64_stats_inc(&txq_stats->napi.poll);
5640 u64_stats_update_end(&txq_stats->napi_syncp);
5641
5642 tx_done = stmmac_tx_clean(priv, budget, chan);
5643 tx_done = min(tx_done, budget);
5644
5645 rx_done = stmmac_rx_zc(priv, budget, chan);
5646
5647 rxtx_done = max(tx_done, rx_done);
5648
5649 /* If either TX or RX work is not complete, return budget
5650 * and keep pooling
5651 */
5652 if (rxtx_done >= budget)
5653 return budget;
5654
5655 /* all work done, exit the polling mode */
5656 if (napi_complete_done(napi, rxtx_done)) {
5657 unsigned long flags;
5658
5659 spin_lock_irqsave(&ch->lock, flags);
5660 /* Both RX and TX work done are compelte,
5661 * so enable both RX & TX IRQs.
5662 */
5663 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5664 spin_unlock_irqrestore(&ch->lock, flags);
5665 }
5666
5667 return min(rxtx_done, budget - 1);
5668 }
5669
5670 /**
5671 * stmmac_tx_timeout
5672 * @dev : Pointer to net device structure
5673 * @txqueue: the index of the hanging transmit queue
5674 * Description: this function is called when a packet transmission fails to
5675 * complete within a reasonable time. The driver will mark the error in the
5676 * netdev structure and arrange for the device to be reset to a sane state
5677 * in order to transmit a new packet.
5678 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5679 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5680 {
5681 struct stmmac_priv *priv = netdev_priv(dev);
5682
5683 stmmac_global_err(priv);
5684 }
5685
5686 /**
5687 * stmmac_set_rx_mode - entry point for multicast addressing
5688 * @dev : pointer to the device structure
5689 * Description:
5690 * This function is a driver entry point which gets called by the kernel
5691 * whenever multicast addresses must be enabled/disabled.
5692 * Return value:
5693 * void.
5694 */
stmmac_set_rx_mode(struct net_device * dev)5695 static void stmmac_set_rx_mode(struct net_device *dev)
5696 {
5697 struct stmmac_priv *priv = netdev_priv(dev);
5698
5699 stmmac_set_filter(priv, priv->hw, dev);
5700 }
5701
5702 /**
5703 * stmmac_change_mtu - entry point to change MTU size for the device.
5704 * @dev : device pointer.
5705 * @new_mtu : the new MTU size for the device.
5706 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5707 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5708 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5709 * Return value:
5710 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5711 * file on failure.
5712 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5713 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5714 {
5715 struct stmmac_priv *priv = netdev_priv(dev);
5716 int txfifosz = priv->plat->tx_fifo_size;
5717 struct stmmac_dma_conf *dma_conf;
5718 const int mtu = new_mtu;
5719 int ret;
5720
5721 if (txfifosz == 0)
5722 txfifosz = priv->dma_cap.tx_fifo_size;
5723
5724 txfifosz /= priv->plat->tx_queues_to_use;
5725
5726 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5727 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5728 return -EINVAL;
5729 }
5730
5731 new_mtu = STMMAC_ALIGN(new_mtu);
5732
5733 /* If condition true, FIFO is too small or MTU too large */
5734 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5735 return -EINVAL;
5736
5737 if (netif_running(dev)) {
5738 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5739 /* Try to allocate the new DMA conf with the new mtu */
5740 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5741 if (IS_ERR(dma_conf)) {
5742 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5743 mtu);
5744 return PTR_ERR(dma_conf);
5745 }
5746
5747 stmmac_release(dev);
5748
5749 ret = __stmmac_open(dev, dma_conf);
5750 if (ret) {
5751 free_dma_desc_resources(priv, dma_conf);
5752 kfree(dma_conf);
5753 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5754 return ret;
5755 }
5756
5757 kfree(dma_conf);
5758
5759 stmmac_set_rx_mode(dev);
5760 }
5761
5762 dev->mtu = mtu;
5763 netdev_update_features(dev);
5764
5765 return 0;
5766 }
5767
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5768 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5769 netdev_features_t features)
5770 {
5771 struct stmmac_priv *priv = netdev_priv(dev);
5772
5773 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5774 features &= ~NETIF_F_RXCSUM;
5775
5776 if (!priv->plat->tx_coe)
5777 features &= ~NETIF_F_CSUM_MASK;
5778
5779 /* Some GMAC devices have a bugged Jumbo frame support that
5780 * needs to have the Tx COE disabled for oversized frames
5781 * (due to limited buffer sizes). In this case we disable
5782 * the TX csum insertion in the TDES and not use SF.
5783 */
5784 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5785 features &= ~NETIF_F_CSUM_MASK;
5786
5787 /* Disable tso if asked by ethtool */
5788 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5789 if (features & NETIF_F_TSO)
5790 priv->tso = true;
5791 else
5792 priv->tso = false;
5793 }
5794
5795 return features;
5796 }
5797
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5798 static int stmmac_set_features(struct net_device *netdev,
5799 netdev_features_t features)
5800 {
5801 struct stmmac_priv *priv = netdev_priv(netdev);
5802
5803 /* Keep the COE Type in case of csum is supporting */
5804 if (features & NETIF_F_RXCSUM)
5805 priv->hw->rx_csum = priv->plat->rx_coe;
5806 else
5807 priv->hw->rx_csum = 0;
5808 /* No check needed because rx_coe has been set before and it will be
5809 * fixed in case of issue.
5810 */
5811 stmmac_rx_ipc(priv, priv->hw);
5812
5813 if (priv->sph_cap) {
5814 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5815 u32 chan;
5816
5817 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5818 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5819 }
5820
5821 return 0;
5822 }
5823
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5824 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5825 {
5826 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5827 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5828 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5829 bool *hs_enable = &fpe_cfg->hs_enable;
5830
5831 if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5832 return;
5833
5834 /* If LP has sent verify mPacket, LP is FPE capable */
5835 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5836 if (*lp_state < FPE_STATE_CAPABLE)
5837 *lp_state = FPE_STATE_CAPABLE;
5838
5839 /* If user has requested FPE enable, quickly response */
5840 if (*hs_enable)
5841 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5842 fpe_cfg,
5843 MPACKET_RESPONSE);
5844 }
5845
5846 /* If Local has sent verify mPacket, Local is FPE capable */
5847 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5848 if (*lo_state < FPE_STATE_CAPABLE)
5849 *lo_state = FPE_STATE_CAPABLE;
5850 }
5851
5852 /* If LP has sent response mPacket, LP is entering FPE ON */
5853 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5854 *lp_state = FPE_STATE_ENTERING_ON;
5855
5856 /* If Local has sent response mPacket, Local is entering FPE ON */
5857 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5858 *lo_state = FPE_STATE_ENTERING_ON;
5859
5860 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5861 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5862 priv->fpe_wq) {
5863 queue_work(priv->fpe_wq, &priv->fpe_task);
5864 }
5865 }
5866
stmmac_common_interrupt(struct stmmac_priv * priv)5867 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5868 {
5869 u32 rx_cnt = priv->plat->rx_queues_to_use;
5870 u32 tx_cnt = priv->plat->tx_queues_to_use;
5871 u32 queues_count;
5872 u32 queue;
5873 bool xmac;
5874
5875 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5876 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5877
5878 if (priv->irq_wake)
5879 pm_wakeup_event(priv->device, 0);
5880
5881 if (priv->dma_cap.estsel)
5882 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5883 &priv->xstats, tx_cnt);
5884
5885 if (priv->dma_cap.fpesel) {
5886 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5887 priv->dev);
5888
5889 stmmac_fpe_event_status(priv, status);
5890 }
5891
5892 /* To handle GMAC own interrupts */
5893 if ((priv->plat->has_gmac) || xmac) {
5894 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5895
5896 if (unlikely(status)) {
5897 /* For LPI we need to save the tx status */
5898 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5899 priv->tx_path_in_lpi_mode = true;
5900 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5901 priv->tx_path_in_lpi_mode = false;
5902 }
5903
5904 for (queue = 0; queue < queues_count; queue++) {
5905 status = stmmac_host_mtl_irq_status(priv, priv->hw,
5906 queue);
5907 }
5908
5909 /* PCS link status */
5910 if (priv->hw->pcs &&
5911 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5912 if (priv->xstats.pcs_link)
5913 netif_carrier_on(priv->dev);
5914 else
5915 netif_carrier_off(priv->dev);
5916 }
5917
5918 stmmac_timestamp_interrupt(priv, priv);
5919 }
5920 }
5921
5922 /**
5923 * stmmac_interrupt - main ISR
5924 * @irq: interrupt number.
5925 * @dev_id: to pass the net device pointer.
5926 * Description: this is the main driver interrupt service routine.
5927 * It can call:
5928 * o DMA service routine (to manage incoming frame reception and transmission
5929 * status)
5930 * o Core interrupts to manage: remote wake-up, management counter, LPI
5931 * interrupts.
5932 */
stmmac_interrupt(int irq,void * dev_id)5933 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5934 {
5935 struct net_device *dev = (struct net_device *)dev_id;
5936 struct stmmac_priv *priv = netdev_priv(dev);
5937
5938 /* Check if adapter is up */
5939 if (test_bit(STMMAC_DOWN, &priv->state))
5940 return IRQ_HANDLED;
5941
5942 /* Check if a fatal error happened */
5943 if (stmmac_safety_feat_interrupt(priv))
5944 return IRQ_HANDLED;
5945
5946 /* To handle Common interrupts */
5947 stmmac_common_interrupt(priv);
5948
5949 /* To handle DMA interrupts */
5950 stmmac_dma_interrupt(priv);
5951
5952 return IRQ_HANDLED;
5953 }
5954
stmmac_mac_interrupt(int irq,void * dev_id)5955 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5956 {
5957 struct net_device *dev = (struct net_device *)dev_id;
5958 struct stmmac_priv *priv = netdev_priv(dev);
5959
5960 /* Check if adapter is up */
5961 if (test_bit(STMMAC_DOWN, &priv->state))
5962 return IRQ_HANDLED;
5963
5964 /* To handle Common interrupts */
5965 stmmac_common_interrupt(priv);
5966
5967 return IRQ_HANDLED;
5968 }
5969
stmmac_safety_interrupt(int irq,void * dev_id)5970 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5971 {
5972 struct net_device *dev = (struct net_device *)dev_id;
5973 struct stmmac_priv *priv = netdev_priv(dev);
5974
5975 /* Check if adapter is up */
5976 if (test_bit(STMMAC_DOWN, &priv->state))
5977 return IRQ_HANDLED;
5978
5979 /* Check if a fatal error happened */
5980 stmmac_safety_feat_interrupt(priv);
5981
5982 return IRQ_HANDLED;
5983 }
5984
stmmac_msi_intr_tx(int irq,void * data)5985 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5986 {
5987 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5988 struct stmmac_dma_conf *dma_conf;
5989 int chan = tx_q->queue_index;
5990 struct stmmac_priv *priv;
5991 int status;
5992
5993 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5994 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5995
5996 /* Check if adapter is up */
5997 if (test_bit(STMMAC_DOWN, &priv->state))
5998 return IRQ_HANDLED;
5999
6000 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6001
6002 if (unlikely(status & tx_hard_error_bump_tc)) {
6003 /* Try to bump up the dma threshold on this failure */
6004 stmmac_bump_dma_threshold(priv, chan);
6005 } else if (unlikely(status == tx_hard_error)) {
6006 stmmac_tx_err(priv, chan);
6007 }
6008
6009 return IRQ_HANDLED;
6010 }
6011
stmmac_msi_intr_rx(int irq,void * data)6012 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6013 {
6014 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6015 struct stmmac_dma_conf *dma_conf;
6016 int chan = rx_q->queue_index;
6017 struct stmmac_priv *priv;
6018
6019 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6020 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6021
6022 /* Check if adapter is up */
6023 if (test_bit(STMMAC_DOWN, &priv->state))
6024 return IRQ_HANDLED;
6025
6026 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6027
6028 return IRQ_HANDLED;
6029 }
6030
6031 /**
6032 * stmmac_ioctl - Entry point for the Ioctl
6033 * @dev: Device pointer.
6034 * @rq: An IOCTL specefic structure, that can contain a pointer to
6035 * a proprietary structure used to pass information to the driver.
6036 * @cmd: IOCTL command
6037 * Description:
6038 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6039 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6040 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6041 {
6042 struct stmmac_priv *priv = netdev_priv (dev);
6043 int ret = -EOPNOTSUPP;
6044
6045 if (!netif_running(dev))
6046 return -EINVAL;
6047
6048 switch (cmd) {
6049 case SIOCGMIIPHY:
6050 case SIOCGMIIREG:
6051 case SIOCSMIIREG:
6052 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6053 break;
6054 case SIOCSHWTSTAMP:
6055 ret = stmmac_hwtstamp_set(dev, rq);
6056 break;
6057 case SIOCGHWTSTAMP:
6058 ret = stmmac_hwtstamp_get(dev, rq);
6059 break;
6060 default:
6061 break;
6062 }
6063
6064 return ret;
6065 }
6066
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6067 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6068 void *cb_priv)
6069 {
6070 struct stmmac_priv *priv = cb_priv;
6071 int ret = -EOPNOTSUPP;
6072
6073 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6074 return ret;
6075
6076 __stmmac_disable_all_queues(priv);
6077
6078 switch (type) {
6079 case TC_SETUP_CLSU32:
6080 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6081 break;
6082 case TC_SETUP_CLSFLOWER:
6083 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6084 break;
6085 default:
6086 break;
6087 }
6088
6089 stmmac_enable_all_queues(priv);
6090 return ret;
6091 }
6092
6093 static LIST_HEAD(stmmac_block_cb_list);
6094
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6095 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6096 void *type_data)
6097 {
6098 struct stmmac_priv *priv = netdev_priv(ndev);
6099
6100 switch (type) {
6101 case TC_QUERY_CAPS:
6102 return stmmac_tc_query_caps(priv, priv, type_data);
6103 case TC_SETUP_BLOCK:
6104 return flow_block_cb_setup_simple(type_data,
6105 &stmmac_block_cb_list,
6106 stmmac_setup_tc_block_cb,
6107 priv, priv, true);
6108 case TC_SETUP_QDISC_CBS:
6109 return stmmac_tc_setup_cbs(priv, priv, type_data);
6110 case TC_SETUP_QDISC_TAPRIO:
6111 return stmmac_tc_setup_taprio(priv, priv, type_data);
6112 case TC_SETUP_QDISC_ETF:
6113 return stmmac_tc_setup_etf(priv, priv, type_data);
6114 default:
6115 return -EOPNOTSUPP;
6116 }
6117 }
6118
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6119 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6120 struct net_device *sb_dev)
6121 {
6122 int gso = skb_shinfo(skb)->gso_type;
6123
6124 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6125 /*
6126 * There is no way to determine the number of TSO/USO
6127 * capable Queues. Let's use always the Queue 0
6128 * because if TSO/USO is supported then at least this
6129 * one will be capable.
6130 */
6131 return 0;
6132 }
6133
6134 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6135 }
6136
stmmac_set_mac_address(struct net_device * ndev,void * addr)6137 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6138 {
6139 struct stmmac_priv *priv = netdev_priv(ndev);
6140 int ret = 0;
6141
6142 ret = pm_runtime_resume_and_get(priv->device);
6143 if (ret < 0)
6144 return ret;
6145
6146 ret = eth_mac_addr(ndev, addr);
6147 if (ret)
6148 goto set_mac_error;
6149
6150 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6151
6152 set_mac_error:
6153 pm_runtime_put(priv->device);
6154
6155 return ret;
6156 }
6157
6158 #ifdef CONFIG_DEBUG_FS
6159 static struct dentry *stmmac_fs_dir;
6160
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6161 static void sysfs_display_ring(void *head, int size, int extend_desc,
6162 struct seq_file *seq, dma_addr_t dma_phy_addr)
6163 {
6164 int i;
6165 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6166 struct dma_desc *p = (struct dma_desc *)head;
6167 dma_addr_t dma_addr;
6168
6169 for (i = 0; i < size; i++) {
6170 if (extend_desc) {
6171 dma_addr = dma_phy_addr + i * sizeof(*ep);
6172 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6173 i, &dma_addr,
6174 le32_to_cpu(ep->basic.des0),
6175 le32_to_cpu(ep->basic.des1),
6176 le32_to_cpu(ep->basic.des2),
6177 le32_to_cpu(ep->basic.des3));
6178 ep++;
6179 } else {
6180 dma_addr = dma_phy_addr + i * sizeof(*p);
6181 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6182 i, &dma_addr,
6183 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6184 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6185 p++;
6186 }
6187 seq_printf(seq, "\n");
6188 }
6189 }
6190
stmmac_rings_status_show(struct seq_file * seq,void * v)6191 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6192 {
6193 struct net_device *dev = seq->private;
6194 struct stmmac_priv *priv = netdev_priv(dev);
6195 u32 rx_count = priv->plat->rx_queues_to_use;
6196 u32 tx_count = priv->plat->tx_queues_to_use;
6197 u32 queue;
6198
6199 if ((dev->flags & IFF_UP) == 0)
6200 return 0;
6201
6202 for (queue = 0; queue < rx_count; queue++) {
6203 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6204
6205 seq_printf(seq, "RX Queue %d:\n", queue);
6206
6207 if (priv->extend_desc) {
6208 seq_printf(seq, "Extended descriptor ring:\n");
6209 sysfs_display_ring((void *)rx_q->dma_erx,
6210 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6211 } else {
6212 seq_printf(seq, "Descriptor ring:\n");
6213 sysfs_display_ring((void *)rx_q->dma_rx,
6214 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6215 }
6216 }
6217
6218 for (queue = 0; queue < tx_count; queue++) {
6219 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6220
6221 seq_printf(seq, "TX Queue %d:\n", queue);
6222
6223 if (priv->extend_desc) {
6224 seq_printf(seq, "Extended descriptor ring:\n");
6225 sysfs_display_ring((void *)tx_q->dma_etx,
6226 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6227 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6228 seq_printf(seq, "Descriptor ring:\n");
6229 sysfs_display_ring((void *)tx_q->dma_tx,
6230 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6231 }
6232 }
6233
6234 return 0;
6235 }
6236 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6237
stmmac_dma_cap_show(struct seq_file * seq,void * v)6238 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6239 {
6240 static const char * const dwxgmac_timestamp_source[] = {
6241 "None",
6242 "Internal",
6243 "External",
6244 "Both",
6245 };
6246 static const char * const dwxgmac_safety_feature_desc[] = {
6247 "No",
6248 "All Safety Features with ECC and Parity",
6249 "All Safety Features without ECC or Parity",
6250 "All Safety Features with Parity Only",
6251 "ECC Only",
6252 "UNDEFINED",
6253 "UNDEFINED",
6254 "UNDEFINED",
6255 };
6256 struct net_device *dev = seq->private;
6257 struct stmmac_priv *priv = netdev_priv(dev);
6258
6259 if (!priv->hw_cap_support) {
6260 seq_printf(seq, "DMA HW features not supported\n");
6261 return 0;
6262 }
6263
6264 seq_printf(seq, "==============================\n");
6265 seq_printf(seq, "\tDMA HW features\n");
6266 seq_printf(seq, "==============================\n");
6267
6268 seq_printf(seq, "\t10/100 Mbps: %s\n",
6269 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6270 seq_printf(seq, "\t1000 Mbps: %s\n",
6271 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6272 seq_printf(seq, "\tHalf duplex: %s\n",
6273 (priv->dma_cap.half_duplex) ? "Y" : "N");
6274 if (priv->plat->has_xgmac) {
6275 seq_printf(seq,
6276 "\tNumber of Additional MAC address registers: %d\n",
6277 priv->dma_cap.multi_addr);
6278 } else {
6279 seq_printf(seq, "\tHash Filter: %s\n",
6280 (priv->dma_cap.hash_filter) ? "Y" : "N");
6281 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6282 (priv->dma_cap.multi_addr) ? "Y" : "N");
6283 }
6284 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6285 (priv->dma_cap.pcs) ? "Y" : "N");
6286 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6287 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6288 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6289 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6290 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6291 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6292 seq_printf(seq, "\tRMON module: %s\n",
6293 (priv->dma_cap.rmon) ? "Y" : "N");
6294 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6295 (priv->dma_cap.time_stamp) ? "Y" : "N");
6296 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6297 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6298 if (priv->plat->has_xgmac)
6299 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6300 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6301 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6302 (priv->dma_cap.eee) ? "Y" : "N");
6303 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6304 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6305 (priv->dma_cap.tx_coe) ? "Y" : "N");
6306 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6307 priv->plat->has_xgmac) {
6308 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6309 (priv->dma_cap.rx_coe) ? "Y" : "N");
6310 } else {
6311 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6312 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6313 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6314 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6315 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6316 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6317 }
6318 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6319 priv->dma_cap.number_rx_channel);
6320 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6321 priv->dma_cap.number_tx_channel);
6322 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6323 priv->dma_cap.number_rx_queues);
6324 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6325 priv->dma_cap.number_tx_queues);
6326 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6327 (priv->dma_cap.enh_desc) ? "Y" : "N");
6328 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6329 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6330 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6331 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6332 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6333 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6334 priv->dma_cap.pps_out_num);
6335 seq_printf(seq, "\tSafety Features: %s\n",
6336 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6337 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6338 priv->dma_cap.frpsel ? "Y" : "N");
6339 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6340 priv->dma_cap.host_dma_width);
6341 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6342 priv->dma_cap.rssen ? "Y" : "N");
6343 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6344 priv->dma_cap.vlhash ? "Y" : "N");
6345 seq_printf(seq, "\tSplit Header: %s\n",
6346 priv->dma_cap.sphen ? "Y" : "N");
6347 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6348 priv->dma_cap.vlins ? "Y" : "N");
6349 seq_printf(seq, "\tDouble VLAN: %s\n",
6350 priv->dma_cap.dvlan ? "Y" : "N");
6351 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6352 priv->dma_cap.l3l4fnum);
6353 seq_printf(seq, "\tARP Offloading: %s\n",
6354 priv->dma_cap.arpoffsel ? "Y" : "N");
6355 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6356 priv->dma_cap.estsel ? "Y" : "N");
6357 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6358 priv->dma_cap.fpesel ? "Y" : "N");
6359 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6360 priv->dma_cap.tbssel ? "Y" : "N");
6361 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6362 priv->dma_cap.tbs_ch_num);
6363 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6364 priv->dma_cap.sgfsel ? "Y" : "N");
6365 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6366 BIT(priv->dma_cap.ttsfd) >> 1);
6367 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6368 priv->dma_cap.numtc);
6369 seq_printf(seq, "\tDCB Feature: %s\n",
6370 priv->dma_cap.dcben ? "Y" : "N");
6371 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6372 priv->dma_cap.advthword ? "Y" : "N");
6373 seq_printf(seq, "\tPTP Offload: %s\n",
6374 priv->dma_cap.ptoen ? "Y" : "N");
6375 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6376 priv->dma_cap.osten ? "Y" : "N");
6377 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6378 priv->dma_cap.pfcen ? "Y" : "N");
6379 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6380 BIT(priv->dma_cap.frpes) << 6);
6381 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6382 BIT(priv->dma_cap.frpbs) << 6);
6383 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6384 priv->dma_cap.frppipe_num);
6385 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6386 priv->dma_cap.nrvf_num ?
6387 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6388 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6389 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6390 seq_printf(seq, "\tDepth of GCL: %lu\n",
6391 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6392 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6393 priv->dma_cap.cbtisel ? "Y" : "N");
6394 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6395 priv->dma_cap.aux_snapshot_n);
6396 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6397 priv->dma_cap.pou_ost_en ? "Y" : "N");
6398 seq_printf(seq, "\tEnhanced DMA: %s\n",
6399 priv->dma_cap.edma ? "Y" : "N");
6400 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6401 priv->dma_cap.ediffc ? "Y" : "N");
6402 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6403 priv->dma_cap.vxn ? "Y" : "N");
6404 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6405 priv->dma_cap.dbgmem ? "Y" : "N");
6406 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6407 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6408 return 0;
6409 }
6410 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6411
6412 /* Use network device events to rename debugfs file entries.
6413 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6414 static int stmmac_device_event(struct notifier_block *unused,
6415 unsigned long event, void *ptr)
6416 {
6417 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6418 struct stmmac_priv *priv = netdev_priv(dev);
6419
6420 if (dev->netdev_ops != &stmmac_netdev_ops)
6421 goto done;
6422
6423 switch (event) {
6424 case NETDEV_CHANGENAME:
6425 if (priv->dbgfs_dir)
6426 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6427 priv->dbgfs_dir,
6428 stmmac_fs_dir,
6429 dev->name);
6430 break;
6431 }
6432 done:
6433 return NOTIFY_DONE;
6434 }
6435
6436 static struct notifier_block stmmac_notifier = {
6437 .notifier_call = stmmac_device_event,
6438 };
6439
stmmac_init_fs(struct net_device * dev)6440 static void stmmac_init_fs(struct net_device *dev)
6441 {
6442 struct stmmac_priv *priv = netdev_priv(dev);
6443
6444 rtnl_lock();
6445
6446 /* Create per netdev entries */
6447 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6448
6449 /* Entry to report DMA RX/TX rings */
6450 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6451 &stmmac_rings_status_fops);
6452
6453 /* Entry to report the DMA HW features */
6454 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6455 &stmmac_dma_cap_fops);
6456
6457 rtnl_unlock();
6458 }
6459
stmmac_exit_fs(struct net_device * dev)6460 static void stmmac_exit_fs(struct net_device *dev)
6461 {
6462 struct stmmac_priv *priv = netdev_priv(dev);
6463
6464 debugfs_remove_recursive(priv->dbgfs_dir);
6465 }
6466 #endif /* CONFIG_DEBUG_FS */
6467
stmmac_vid_crc32_le(__le16 vid_le)6468 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6469 {
6470 unsigned char *data = (unsigned char *)&vid_le;
6471 unsigned char data_byte = 0;
6472 u32 crc = ~0x0;
6473 u32 temp = 0;
6474 int i, bits;
6475
6476 bits = get_bitmask_order(VLAN_VID_MASK);
6477 for (i = 0; i < bits; i++) {
6478 if ((i % 8) == 0)
6479 data_byte = data[i / 8];
6480
6481 temp = ((crc & 1) ^ data_byte) & 1;
6482 crc >>= 1;
6483 data_byte >>= 1;
6484
6485 if (temp)
6486 crc ^= 0xedb88320;
6487 }
6488
6489 return crc;
6490 }
6491
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6492 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6493 {
6494 u32 crc, hash = 0;
6495 u16 pmatch = 0;
6496 int count = 0;
6497 u16 vid = 0;
6498
6499 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6500 __le16 vid_le = cpu_to_le16(vid);
6501 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6502 hash |= (1 << crc);
6503 count++;
6504 }
6505
6506 if (!priv->dma_cap.vlhash) {
6507 if (count > 2) /* VID = 0 always passes filter */
6508 return -EOPNOTSUPP;
6509
6510 pmatch = vid;
6511 hash = 0;
6512 }
6513
6514 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6515 }
6516
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6517 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6518 {
6519 struct stmmac_priv *priv = netdev_priv(ndev);
6520 bool is_double = false;
6521 int ret;
6522
6523 ret = pm_runtime_resume_and_get(priv->device);
6524 if (ret < 0)
6525 return ret;
6526
6527 if (be16_to_cpu(proto) == ETH_P_8021AD)
6528 is_double = true;
6529
6530 set_bit(vid, priv->active_vlans);
6531 ret = stmmac_vlan_update(priv, is_double);
6532 if (ret) {
6533 clear_bit(vid, priv->active_vlans);
6534 goto err_pm_put;
6535 }
6536
6537 if (priv->hw->num_vlan) {
6538 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6539 if (ret)
6540 goto err_pm_put;
6541 }
6542 err_pm_put:
6543 pm_runtime_put(priv->device);
6544
6545 return ret;
6546 }
6547
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6548 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6549 {
6550 struct stmmac_priv *priv = netdev_priv(ndev);
6551 bool is_double = false;
6552 int ret;
6553
6554 ret = pm_runtime_resume_and_get(priv->device);
6555 if (ret < 0)
6556 return ret;
6557
6558 if (be16_to_cpu(proto) == ETH_P_8021AD)
6559 is_double = true;
6560
6561 clear_bit(vid, priv->active_vlans);
6562
6563 if (priv->hw->num_vlan) {
6564 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6565 if (ret)
6566 goto del_vlan_error;
6567 }
6568
6569 ret = stmmac_vlan_update(priv, is_double);
6570
6571 del_vlan_error:
6572 pm_runtime_put(priv->device);
6573
6574 return ret;
6575 }
6576
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6577 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6578 {
6579 struct stmmac_priv *priv = netdev_priv(dev);
6580
6581 switch (bpf->command) {
6582 case XDP_SETUP_PROG:
6583 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6584 case XDP_SETUP_XSK_POOL:
6585 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6586 bpf->xsk.queue_id);
6587 default:
6588 return -EOPNOTSUPP;
6589 }
6590 }
6591
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6592 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6593 struct xdp_frame **frames, u32 flags)
6594 {
6595 struct stmmac_priv *priv = netdev_priv(dev);
6596 int cpu = smp_processor_id();
6597 struct netdev_queue *nq;
6598 int i, nxmit = 0;
6599 int queue;
6600
6601 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6602 return -ENETDOWN;
6603
6604 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6605 return -EINVAL;
6606
6607 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6608 nq = netdev_get_tx_queue(priv->dev, queue);
6609
6610 __netif_tx_lock(nq, cpu);
6611 /* Avoids TX time-out as we are sharing with slow path */
6612 txq_trans_cond_update(nq);
6613
6614 for (i = 0; i < num_frames; i++) {
6615 int res;
6616
6617 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6618 if (res == STMMAC_XDP_CONSUMED)
6619 break;
6620
6621 nxmit++;
6622 }
6623
6624 if (flags & XDP_XMIT_FLUSH) {
6625 stmmac_flush_tx_descriptors(priv, queue);
6626 stmmac_tx_timer_arm(priv, queue);
6627 }
6628
6629 __netif_tx_unlock(nq);
6630
6631 return nxmit;
6632 }
6633
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6634 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6635 {
6636 struct stmmac_channel *ch = &priv->channel[queue];
6637 unsigned long flags;
6638
6639 spin_lock_irqsave(&ch->lock, flags);
6640 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6641 spin_unlock_irqrestore(&ch->lock, flags);
6642
6643 stmmac_stop_rx_dma(priv, queue);
6644 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6645 }
6646
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6647 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6648 {
6649 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6650 struct stmmac_channel *ch = &priv->channel[queue];
6651 unsigned long flags;
6652 u32 buf_size;
6653 int ret;
6654
6655 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6656 if (ret) {
6657 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6658 return;
6659 }
6660
6661 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6662 if (ret) {
6663 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6664 netdev_err(priv->dev, "Failed to init RX desc.\n");
6665 return;
6666 }
6667
6668 stmmac_reset_rx_queue(priv, queue);
6669 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6670
6671 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6672 rx_q->dma_rx_phy, rx_q->queue_index);
6673
6674 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6675 sizeof(struct dma_desc));
6676 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6677 rx_q->rx_tail_addr, rx_q->queue_index);
6678
6679 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6680 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6681 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6682 buf_size,
6683 rx_q->queue_index);
6684 } else {
6685 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6686 priv->dma_conf.dma_buf_sz,
6687 rx_q->queue_index);
6688 }
6689
6690 stmmac_start_rx_dma(priv, queue);
6691
6692 spin_lock_irqsave(&ch->lock, flags);
6693 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6694 spin_unlock_irqrestore(&ch->lock, flags);
6695 }
6696
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6697 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6698 {
6699 struct stmmac_channel *ch = &priv->channel[queue];
6700 unsigned long flags;
6701
6702 spin_lock_irqsave(&ch->lock, flags);
6703 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6704 spin_unlock_irqrestore(&ch->lock, flags);
6705
6706 stmmac_stop_tx_dma(priv, queue);
6707 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6708 }
6709
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6710 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6711 {
6712 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6713 struct stmmac_channel *ch = &priv->channel[queue];
6714 unsigned long flags;
6715 int ret;
6716
6717 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6718 if (ret) {
6719 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6720 return;
6721 }
6722
6723 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6724 if (ret) {
6725 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6726 netdev_err(priv->dev, "Failed to init TX desc.\n");
6727 return;
6728 }
6729
6730 stmmac_reset_tx_queue(priv, queue);
6731 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6732
6733 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6734 tx_q->dma_tx_phy, tx_q->queue_index);
6735
6736 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6737 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6738
6739 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6740 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6741 tx_q->tx_tail_addr, tx_q->queue_index);
6742
6743 stmmac_start_tx_dma(priv, queue);
6744
6745 spin_lock_irqsave(&ch->lock, flags);
6746 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6747 spin_unlock_irqrestore(&ch->lock, flags);
6748 }
6749
stmmac_xdp_release(struct net_device * dev)6750 void stmmac_xdp_release(struct net_device *dev)
6751 {
6752 struct stmmac_priv *priv = netdev_priv(dev);
6753 u32 chan;
6754
6755 /* Ensure tx function is not running */
6756 netif_tx_disable(dev);
6757
6758 /* Disable NAPI process */
6759 stmmac_disable_all_queues(priv);
6760
6761 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6762 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6763
6764 /* Free the IRQ lines */
6765 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6766
6767 /* Stop TX/RX DMA channels */
6768 stmmac_stop_all_dma(priv);
6769
6770 /* Release and free the Rx/Tx resources */
6771 free_dma_desc_resources(priv, &priv->dma_conf);
6772
6773 /* Disable the MAC Rx/Tx */
6774 stmmac_mac_set(priv, priv->ioaddr, false);
6775
6776 /* set trans_start so we don't get spurious
6777 * watchdogs during reset
6778 */
6779 netif_trans_update(dev);
6780 netif_carrier_off(dev);
6781 }
6782
stmmac_xdp_open(struct net_device * dev)6783 int stmmac_xdp_open(struct net_device *dev)
6784 {
6785 struct stmmac_priv *priv = netdev_priv(dev);
6786 u32 rx_cnt = priv->plat->rx_queues_to_use;
6787 u32 tx_cnt = priv->plat->tx_queues_to_use;
6788 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6789 struct stmmac_rx_queue *rx_q;
6790 struct stmmac_tx_queue *tx_q;
6791 u32 buf_size;
6792 bool sph_en;
6793 u32 chan;
6794 int ret;
6795
6796 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6797 if (ret < 0) {
6798 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6799 __func__);
6800 goto dma_desc_error;
6801 }
6802
6803 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6804 if (ret < 0) {
6805 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6806 __func__);
6807 goto init_error;
6808 }
6809
6810 stmmac_reset_queues_param(priv);
6811
6812 /* DMA CSR Channel configuration */
6813 for (chan = 0; chan < dma_csr_ch; chan++) {
6814 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6815 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6816 }
6817
6818 /* Adjust Split header */
6819 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6820
6821 /* DMA RX Channel Configuration */
6822 for (chan = 0; chan < rx_cnt; chan++) {
6823 rx_q = &priv->dma_conf.rx_queue[chan];
6824
6825 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6826 rx_q->dma_rx_phy, chan);
6827
6828 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6829 (rx_q->buf_alloc_num *
6830 sizeof(struct dma_desc));
6831 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6832 rx_q->rx_tail_addr, chan);
6833
6834 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6835 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6836 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6837 buf_size,
6838 rx_q->queue_index);
6839 } else {
6840 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6841 priv->dma_conf.dma_buf_sz,
6842 rx_q->queue_index);
6843 }
6844
6845 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6846 }
6847
6848 /* DMA TX Channel Configuration */
6849 for (chan = 0; chan < tx_cnt; chan++) {
6850 tx_q = &priv->dma_conf.tx_queue[chan];
6851
6852 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6853 tx_q->dma_tx_phy, chan);
6854
6855 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6856 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6857 tx_q->tx_tail_addr, chan);
6858
6859 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6860 tx_q->txtimer.function = stmmac_tx_timer;
6861 }
6862
6863 /* Enable the MAC Rx/Tx */
6864 stmmac_mac_set(priv, priv->ioaddr, true);
6865
6866 /* Start Rx & Tx DMA Channels */
6867 stmmac_start_all_dma(priv);
6868
6869 ret = stmmac_request_irq(dev);
6870 if (ret)
6871 goto irq_error;
6872
6873 /* Enable NAPI process*/
6874 stmmac_enable_all_queues(priv);
6875 netif_carrier_on(dev);
6876 netif_tx_start_all_queues(dev);
6877 stmmac_enable_all_dma_irq(priv);
6878
6879 return 0;
6880
6881 irq_error:
6882 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6883 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6884
6885 stmmac_hw_teardown(dev);
6886 init_error:
6887 free_dma_desc_resources(priv, &priv->dma_conf);
6888 dma_desc_error:
6889 return ret;
6890 }
6891
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6892 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6893 {
6894 struct stmmac_priv *priv = netdev_priv(dev);
6895 struct stmmac_rx_queue *rx_q;
6896 struct stmmac_tx_queue *tx_q;
6897 struct stmmac_channel *ch;
6898
6899 if (test_bit(STMMAC_DOWN, &priv->state) ||
6900 !netif_carrier_ok(priv->dev))
6901 return -ENETDOWN;
6902
6903 if (!stmmac_xdp_is_enabled(priv))
6904 return -EINVAL;
6905
6906 if (queue >= priv->plat->rx_queues_to_use ||
6907 queue >= priv->plat->tx_queues_to_use)
6908 return -EINVAL;
6909
6910 rx_q = &priv->dma_conf.rx_queue[queue];
6911 tx_q = &priv->dma_conf.tx_queue[queue];
6912 ch = &priv->channel[queue];
6913
6914 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6915 return -EINVAL;
6916
6917 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6918 /* EQoS does not have per-DMA channel SW interrupt,
6919 * so we schedule RX Napi straight-away.
6920 */
6921 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6922 __napi_schedule(&ch->rxtx_napi);
6923 }
6924
6925 return 0;
6926 }
6927
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6928 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6929 {
6930 struct stmmac_priv *priv = netdev_priv(dev);
6931 u32 tx_cnt = priv->plat->tx_queues_to_use;
6932 u32 rx_cnt = priv->plat->rx_queues_to_use;
6933 unsigned int start;
6934 int q;
6935
6936 for (q = 0; q < tx_cnt; q++) {
6937 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6938 u64 tx_packets;
6939 u64 tx_bytes;
6940
6941 do {
6942 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
6943 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
6944 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
6945 do {
6946 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
6947 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
6948 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
6949
6950 stats->tx_packets += tx_packets;
6951 stats->tx_bytes += tx_bytes;
6952 }
6953
6954 for (q = 0; q < rx_cnt; q++) {
6955 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6956 u64 rx_packets;
6957 u64 rx_bytes;
6958
6959 do {
6960 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
6961 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
6962 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
6963 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
6964
6965 stats->rx_packets += rx_packets;
6966 stats->rx_bytes += rx_bytes;
6967 }
6968
6969 stats->rx_dropped = priv->xstats.rx_dropped;
6970 stats->rx_errors = priv->xstats.rx_errors;
6971 stats->tx_dropped = priv->xstats.tx_dropped;
6972 stats->tx_errors = priv->xstats.tx_errors;
6973 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6974 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6975 stats->rx_length_errors = priv->xstats.rx_length;
6976 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6977 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6978 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6979 }
6980
6981 static const struct net_device_ops stmmac_netdev_ops = {
6982 .ndo_open = stmmac_open,
6983 .ndo_start_xmit = stmmac_xmit,
6984 .ndo_stop = stmmac_release,
6985 .ndo_change_mtu = stmmac_change_mtu,
6986 .ndo_fix_features = stmmac_fix_features,
6987 .ndo_set_features = stmmac_set_features,
6988 .ndo_set_rx_mode = stmmac_set_rx_mode,
6989 .ndo_tx_timeout = stmmac_tx_timeout,
6990 .ndo_eth_ioctl = stmmac_ioctl,
6991 .ndo_get_stats64 = stmmac_get_stats64,
6992 .ndo_setup_tc = stmmac_setup_tc,
6993 .ndo_select_queue = stmmac_select_queue,
6994 .ndo_set_mac_address = stmmac_set_mac_address,
6995 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6996 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6997 .ndo_bpf = stmmac_bpf,
6998 .ndo_xdp_xmit = stmmac_xdp_xmit,
6999 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7000 };
7001
stmmac_reset_subtask(struct stmmac_priv * priv)7002 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7003 {
7004 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7005 return;
7006 if (test_bit(STMMAC_DOWN, &priv->state))
7007 return;
7008
7009 netdev_err(priv->dev, "Reset adapter.\n");
7010
7011 rtnl_lock();
7012 netif_trans_update(priv->dev);
7013 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7014 usleep_range(1000, 2000);
7015
7016 set_bit(STMMAC_DOWN, &priv->state);
7017 dev_close(priv->dev);
7018 dev_open(priv->dev, NULL);
7019 clear_bit(STMMAC_DOWN, &priv->state);
7020 clear_bit(STMMAC_RESETING, &priv->state);
7021 rtnl_unlock();
7022 }
7023
stmmac_service_task(struct work_struct * work)7024 static void stmmac_service_task(struct work_struct *work)
7025 {
7026 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7027 service_task);
7028
7029 stmmac_reset_subtask(priv);
7030 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7031 }
7032
7033 /**
7034 * stmmac_hw_init - Init the MAC device
7035 * @priv: driver private structure
7036 * Description: this function is to configure the MAC device according to
7037 * some platform parameters or the HW capability register. It prepares the
7038 * driver to use either ring or chain modes and to setup either enhanced or
7039 * normal descriptors.
7040 */
stmmac_hw_init(struct stmmac_priv * priv)7041 static int stmmac_hw_init(struct stmmac_priv *priv)
7042 {
7043 int ret;
7044
7045 /* dwmac-sun8i only work in chain mode */
7046 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7047 chain_mode = 1;
7048 priv->chain_mode = chain_mode;
7049
7050 /* Initialize HW Interface */
7051 ret = stmmac_hwif_init(priv);
7052 if (ret)
7053 return ret;
7054
7055 /* Get the HW capability (new GMAC newer than 3.50a) */
7056 priv->hw_cap_support = stmmac_get_hw_features(priv);
7057 if (priv->hw_cap_support) {
7058 dev_info(priv->device, "DMA HW capability register supported\n");
7059
7060 /* We can override some gmac/dma configuration fields: e.g.
7061 * enh_desc, tx_coe (e.g. that are passed through the
7062 * platform) with the values from the HW capability
7063 * register (if supported).
7064 */
7065 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7066 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7067 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7068 priv->hw->pmt = priv->plat->pmt;
7069 if (priv->dma_cap.hash_tb_sz) {
7070 priv->hw->multicast_filter_bins =
7071 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7072 priv->hw->mcast_bits_log2 =
7073 ilog2(priv->hw->multicast_filter_bins);
7074 }
7075
7076 /* TXCOE doesn't work in thresh DMA mode */
7077 if (priv->plat->force_thresh_dma_mode)
7078 priv->plat->tx_coe = 0;
7079 else
7080 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7081
7082 /* In case of GMAC4 rx_coe is from HW cap register. */
7083 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7084
7085 if (priv->dma_cap.rx_coe_type2)
7086 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7087 else if (priv->dma_cap.rx_coe_type1)
7088 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7089
7090 } else {
7091 dev_info(priv->device, "No HW DMA feature register supported\n");
7092 }
7093
7094 if (priv->plat->rx_coe) {
7095 priv->hw->rx_csum = priv->plat->rx_coe;
7096 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7097 if (priv->synopsys_id < DWMAC_CORE_4_00)
7098 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7099 }
7100 if (priv->plat->tx_coe)
7101 dev_info(priv->device, "TX Checksum insertion supported\n");
7102
7103 if (priv->plat->pmt) {
7104 dev_info(priv->device, "Wake-Up On Lan supported\n");
7105 device_set_wakeup_capable(priv->device, 1);
7106 }
7107
7108 if (priv->dma_cap.tsoen)
7109 dev_info(priv->device, "TSO supported\n");
7110
7111 if (priv->dma_cap.number_rx_queues &&
7112 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7113 dev_warn(priv->device,
7114 "Number of Rx queues (%u) exceeds dma capability\n",
7115 priv->plat->rx_queues_to_use);
7116 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7117 }
7118 if (priv->dma_cap.number_tx_queues &&
7119 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7120 dev_warn(priv->device,
7121 "Number of Tx queues (%u) exceeds dma capability\n",
7122 priv->plat->tx_queues_to_use);
7123 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7124 }
7125
7126 if (priv->dma_cap.rx_fifo_size &&
7127 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7128 dev_warn(priv->device,
7129 "Rx FIFO size (%u) exceeds dma capability\n",
7130 priv->plat->rx_fifo_size);
7131 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7132 }
7133 if (priv->dma_cap.tx_fifo_size &&
7134 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7135 dev_warn(priv->device,
7136 "Tx FIFO size (%u) exceeds dma capability\n",
7137 priv->plat->tx_fifo_size);
7138 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7139 }
7140
7141 priv->hw->vlan_fail_q_en =
7142 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7143 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7144
7145 /* Run HW quirks, if any */
7146 if (priv->hwif_quirks) {
7147 ret = priv->hwif_quirks(priv);
7148 if (ret)
7149 return ret;
7150 }
7151
7152 /* Rx Watchdog is available in the COREs newer than the 3.40.
7153 * In some case, for example on bugged HW this feature
7154 * has to be disable and this can be done by passing the
7155 * riwt_off field from the platform.
7156 */
7157 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7158 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7159 priv->use_riwt = 1;
7160 dev_info(priv->device,
7161 "Enable RX Mitigation via HW Watchdog Timer\n");
7162 }
7163
7164 return 0;
7165 }
7166
stmmac_napi_add(struct net_device * dev)7167 static void stmmac_napi_add(struct net_device *dev)
7168 {
7169 struct stmmac_priv *priv = netdev_priv(dev);
7170 u32 queue, maxq;
7171
7172 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7173
7174 for (queue = 0; queue < maxq; queue++) {
7175 struct stmmac_channel *ch = &priv->channel[queue];
7176
7177 ch->priv_data = priv;
7178 ch->index = queue;
7179 spin_lock_init(&ch->lock);
7180
7181 if (queue < priv->plat->rx_queues_to_use) {
7182 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7183 }
7184 if (queue < priv->plat->tx_queues_to_use) {
7185 netif_napi_add_tx(dev, &ch->tx_napi,
7186 stmmac_napi_poll_tx);
7187 }
7188 if (queue < priv->plat->rx_queues_to_use &&
7189 queue < priv->plat->tx_queues_to_use) {
7190 netif_napi_add(dev, &ch->rxtx_napi,
7191 stmmac_napi_poll_rxtx);
7192 }
7193 }
7194 }
7195
stmmac_napi_del(struct net_device * dev)7196 static void stmmac_napi_del(struct net_device *dev)
7197 {
7198 struct stmmac_priv *priv = netdev_priv(dev);
7199 u32 queue, maxq;
7200
7201 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7202
7203 for (queue = 0; queue < maxq; queue++) {
7204 struct stmmac_channel *ch = &priv->channel[queue];
7205
7206 if (queue < priv->plat->rx_queues_to_use)
7207 netif_napi_del(&ch->rx_napi);
7208 if (queue < priv->plat->tx_queues_to_use)
7209 netif_napi_del(&ch->tx_napi);
7210 if (queue < priv->plat->rx_queues_to_use &&
7211 queue < priv->plat->tx_queues_to_use) {
7212 netif_napi_del(&ch->rxtx_napi);
7213 }
7214 }
7215 }
7216
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7217 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7218 {
7219 struct stmmac_priv *priv = netdev_priv(dev);
7220 int ret = 0, i;
7221 int max_speed;
7222
7223 if (netif_running(dev))
7224 stmmac_release(dev);
7225
7226 stmmac_napi_del(dev);
7227
7228 priv->plat->rx_queues_to_use = rx_cnt;
7229 priv->plat->tx_queues_to_use = tx_cnt;
7230 if (!netif_is_rxfh_configured(dev))
7231 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7232 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7233 rx_cnt);
7234
7235 stmmac_mac_phylink_get_caps(priv);
7236
7237 priv->phylink_config.mac_capabilities = priv->hw->link.caps;
7238
7239 max_speed = priv->plat->max_speed;
7240 if (max_speed)
7241 phylink_limit_mac_speed(&priv->phylink_config, max_speed);
7242
7243 stmmac_napi_add(dev);
7244
7245 if (netif_running(dev))
7246 ret = stmmac_open(dev);
7247
7248 return ret;
7249 }
7250
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7251 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7252 {
7253 struct stmmac_priv *priv = netdev_priv(dev);
7254 int ret = 0;
7255
7256 if (netif_running(dev))
7257 stmmac_release(dev);
7258
7259 priv->dma_conf.dma_rx_size = rx_size;
7260 priv->dma_conf.dma_tx_size = tx_size;
7261
7262 if (netif_running(dev))
7263 ret = stmmac_open(dev);
7264
7265 return ret;
7266 }
7267
7268 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
stmmac_fpe_lp_task(struct work_struct * work)7269 static void stmmac_fpe_lp_task(struct work_struct *work)
7270 {
7271 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7272 fpe_task);
7273 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7274 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7275 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7276 bool *hs_enable = &fpe_cfg->hs_enable;
7277 bool *enable = &fpe_cfg->enable;
7278 int retries = 20;
7279
7280 while (retries-- > 0) {
7281 /* Bail out immediately if FPE handshake is OFF */
7282 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7283 break;
7284
7285 if (*lo_state == FPE_STATE_ENTERING_ON &&
7286 *lp_state == FPE_STATE_ENTERING_ON) {
7287 stmmac_fpe_configure(priv, priv->ioaddr,
7288 fpe_cfg,
7289 priv->plat->tx_queues_to_use,
7290 priv->plat->rx_queues_to_use,
7291 *enable);
7292
7293 netdev_info(priv->dev, "configured FPE\n");
7294
7295 *lo_state = FPE_STATE_ON;
7296 *lp_state = FPE_STATE_ON;
7297 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7298 break;
7299 }
7300
7301 if ((*lo_state == FPE_STATE_CAPABLE ||
7302 *lo_state == FPE_STATE_ENTERING_ON) &&
7303 *lp_state != FPE_STATE_ON) {
7304 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7305 *lo_state, *lp_state);
7306 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7307 fpe_cfg,
7308 MPACKET_VERIFY);
7309 }
7310 /* Sleep then retry */
7311 msleep(500);
7312 }
7313
7314 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7315 }
7316
stmmac_fpe_handshake(struct stmmac_priv * priv,bool enable)7317 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7318 {
7319 if (priv->plat->fpe_cfg->hs_enable != enable) {
7320 if (enable) {
7321 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7322 priv->plat->fpe_cfg,
7323 MPACKET_VERIFY);
7324 } else {
7325 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7326 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7327 }
7328
7329 priv->plat->fpe_cfg->hs_enable = enable;
7330 }
7331 }
7332
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7333 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7334 {
7335 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7336 struct dma_desc *desc_contains_ts = ctx->desc;
7337 struct stmmac_priv *priv = ctx->priv;
7338 struct dma_desc *ndesc = ctx->ndesc;
7339 struct dma_desc *desc = ctx->desc;
7340 u64 ns = 0;
7341
7342 if (!priv->hwts_rx_en)
7343 return -ENODATA;
7344
7345 /* For GMAC4, the valid timestamp is from CTX next desc. */
7346 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7347 desc_contains_ts = ndesc;
7348
7349 /* Check if timestamp is available */
7350 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7351 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7352 ns -= priv->plat->cdc_error_adj;
7353 *timestamp = ns_to_ktime(ns);
7354 return 0;
7355 }
7356
7357 return -ENODATA;
7358 }
7359
7360 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7361 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7362 };
7363
7364 /**
7365 * stmmac_dvr_probe
7366 * @device: device pointer
7367 * @plat_dat: platform data pointer
7368 * @res: stmmac resource pointer
7369 * Description: this is the main probe function used to
7370 * call the alloc_etherdev, allocate the priv structure.
7371 * Return:
7372 * returns 0 on success, otherwise errno.
7373 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7374 int stmmac_dvr_probe(struct device *device,
7375 struct plat_stmmacenet_data *plat_dat,
7376 struct stmmac_resources *res)
7377 {
7378 struct net_device *ndev = NULL;
7379 struct stmmac_priv *priv;
7380 u32 rxq;
7381 int i, ret = 0;
7382
7383 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7384 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7385 if (!ndev)
7386 return -ENOMEM;
7387
7388 SET_NETDEV_DEV(ndev, device);
7389
7390 priv = netdev_priv(ndev);
7391 priv->device = device;
7392 priv->dev = ndev;
7393
7394 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7395 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7396 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7397 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7398 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7399 }
7400
7401 priv->xstats.pcpu_stats =
7402 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7403 if (!priv->xstats.pcpu_stats)
7404 return -ENOMEM;
7405
7406 stmmac_set_ethtool_ops(ndev);
7407 priv->pause = pause;
7408 priv->plat = plat_dat;
7409 priv->ioaddr = res->addr;
7410 priv->dev->base_addr = (unsigned long)res->addr;
7411 priv->plat->dma_cfg->multi_msi_en =
7412 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7413
7414 priv->dev->irq = res->irq;
7415 priv->wol_irq = res->wol_irq;
7416 priv->lpi_irq = res->lpi_irq;
7417 priv->sfty_ce_irq = res->sfty_ce_irq;
7418 priv->sfty_ue_irq = res->sfty_ue_irq;
7419 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7420 priv->rx_irq[i] = res->rx_irq[i];
7421 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7422 priv->tx_irq[i] = res->tx_irq[i];
7423
7424 if (!is_zero_ether_addr(res->mac))
7425 eth_hw_addr_set(priv->dev, res->mac);
7426
7427 dev_set_drvdata(device, priv->dev);
7428
7429 /* Verify driver arguments */
7430 stmmac_verify_args();
7431
7432 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7433 if (!priv->af_xdp_zc_qps)
7434 return -ENOMEM;
7435
7436 /* Allocate workqueue */
7437 priv->wq = create_singlethread_workqueue("stmmac_wq");
7438 if (!priv->wq) {
7439 dev_err(priv->device, "failed to create workqueue\n");
7440 ret = -ENOMEM;
7441 goto error_wq_init;
7442 }
7443
7444 INIT_WORK(&priv->service_task, stmmac_service_task);
7445
7446 /* Initialize Link Partner FPE workqueue */
7447 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7448
7449 /* Override with kernel parameters if supplied XXX CRS XXX
7450 * this needs to have multiple instances
7451 */
7452 if ((phyaddr >= 0) && (phyaddr <= 31))
7453 priv->plat->phy_addr = phyaddr;
7454
7455 if (priv->plat->stmmac_rst) {
7456 ret = reset_control_assert(priv->plat->stmmac_rst);
7457 reset_control_deassert(priv->plat->stmmac_rst);
7458 /* Some reset controllers have only reset callback instead of
7459 * assert + deassert callbacks pair.
7460 */
7461 if (ret == -ENOTSUPP)
7462 reset_control_reset(priv->plat->stmmac_rst);
7463 }
7464
7465 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7466 if (ret == -ENOTSUPP)
7467 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7468 ERR_PTR(ret));
7469
7470 /* Wait a bit for the reset to take effect */
7471 udelay(10);
7472
7473 /* Init MAC and get the capabilities */
7474 ret = stmmac_hw_init(priv);
7475 if (ret)
7476 goto error_hw_init;
7477
7478 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7479 */
7480 if (priv->synopsys_id < DWMAC_CORE_5_20)
7481 priv->plat->dma_cfg->dche = false;
7482
7483 stmmac_check_ether_addr(priv);
7484
7485 ndev->netdev_ops = &stmmac_netdev_ops;
7486
7487 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7488
7489 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7490 NETIF_F_RXCSUM;
7491 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7492 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7493
7494 ret = stmmac_tc_init(priv, priv);
7495 if (!ret) {
7496 ndev->hw_features |= NETIF_F_HW_TC;
7497 }
7498
7499 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7500 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7501 if (priv->plat->has_gmac4)
7502 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7503 priv->tso = true;
7504 dev_info(priv->device, "TSO feature enabled\n");
7505 }
7506
7507 if (priv->dma_cap.sphen &&
7508 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7509 ndev->hw_features |= NETIF_F_GRO;
7510 priv->sph_cap = true;
7511 priv->sph = priv->sph_cap;
7512 dev_info(priv->device, "SPH feature enabled\n");
7513 }
7514
7515 /* Ideally our host DMA address width is the same as for the
7516 * device. However, it may differ and then we have to use our
7517 * host DMA width for allocation and the device DMA width for
7518 * register handling.
7519 */
7520 if (priv->plat->host_dma_width)
7521 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7522 else
7523 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7524
7525 if (priv->dma_cap.host_dma_width) {
7526 ret = dma_set_mask_and_coherent(device,
7527 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7528 if (!ret) {
7529 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7530 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7531
7532 /*
7533 * If more than 32 bits can be addressed, make sure to
7534 * enable enhanced addressing mode.
7535 */
7536 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7537 priv->plat->dma_cfg->eame = true;
7538 } else {
7539 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7540 if (ret) {
7541 dev_err(priv->device, "Failed to set DMA Mask\n");
7542 goto error_hw_init;
7543 }
7544
7545 priv->dma_cap.host_dma_width = 32;
7546 }
7547 }
7548
7549 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7550 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7551 #ifdef STMMAC_VLAN_TAG_USED
7552 /* Both mac100 and gmac support receive VLAN tag detection */
7553 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7554 if (priv->dma_cap.vlhash) {
7555 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7556 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7557 }
7558 if (priv->dma_cap.vlins) {
7559 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7560 if (priv->dma_cap.dvlan)
7561 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7562 }
7563 #endif
7564 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7565
7566 priv->xstats.threshold = tc;
7567
7568 /* Initialize RSS */
7569 rxq = priv->plat->rx_queues_to_use;
7570 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7571 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7572 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7573
7574 if (priv->dma_cap.rssen && priv->plat->rss_en)
7575 ndev->features |= NETIF_F_RXHASH;
7576
7577 ndev->vlan_features |= ndev->features;
7578 /* TSO doesn't work on VLANs yet */
7579 ndev->vlan_features &= ~NETIF_F_TSO;
7580
7581 /* MTU range: 46 - hw-specific max */
7582 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7583 if (priv->plat->has_xgmac)
7584 ndev->max_mtu = XGMAC_JUMBO_LEN;
7585 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7586 ndev->max_mtu = JUMBO_LEN;
7587 else
7588 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7589 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7590 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7591 */
7592 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7593 (priv->plat->maxmtu >= ndev->min_mtu))
7594 ndev->max_mtu = priv->plat->maxmtu;
7595 else if (priv->plat->maxmtu < ndev->min_mtu)
7596 dev_warn(priv->device,
7597 "%s: warning: maxmtu having invalid value (%d)\n",
7598 __func__, priv->plat->maxmtu);
7599
7600 if (flow_ctrl)
7601 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7602
7603 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7604
7605 /* Setup channels NAPI */
7606 stmmac_napi_add(ndev);
7607
7608 mutex_init(&priv->lock);
7609
7610 /* If a specific clk_csr value is passed from the platform
7611 * this means that the CSR Clock Range selection cannot be
7612 * changed at run-time and it is fixed. Viceversa the driver'll try to
7613 * set the MDC clock dynamically according to the csr actual
7614 * clock input.
7615 */
7616 if (priv->plat->clk_csr >= 0)
7617 priv->clk_csr = priv->plat->clk_csr;
7618 else
7619 stmmac_clk_csr_set(priv);
7620
7621 stmmac_check_pcs_mode(priv);
7622
7623 pm_runtime_get_noresume(device);
7624 pm_runtime_set_active(device);
7625 if (!pm_runtime_enabled(device))
7626 pm_runtime_enable(device);
7627
7628 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7629 priv->hw->pcs != STMMAC_PCS_RTBI) {
7630 /* MDIO bus Registration */
7631 ret = stmmac_mdio_register(ndev);
7632 if (ret < 0) {
7633 dev_err_probe(priv->device, ret,
7634 "%s: MDIO bus (id: %d) registration failed\n",
7635 __func__, priv->plat->bus_id);
7636 goto error_mdio_register;
7637 }
7638 }
7639
7640 if (priv->plat->speed_mode_2500)
7641 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7642
7643 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7644 ret = stmmac_xpcs_setup(priv->mii);
7645 if (ret)
7646 goto error_xpcs_setup;
7647 }
7648
7649 ret = stmmac_phy_setup(priv);
7650 if (ret) {
7651 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7652 goto error_phy_setup;
7653 }
7654
7655 ret = register_netdev(ndev);
7656 if (ret) {
7657 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7658 __func__, ret);
7659 goto error_netdev_register;
7660 }
7661
7662 #ifdef CONFIG_DEBUG_FS
7663 stmmac_init_fs(ndev);
7664 #endif
7665
7666 if (priv->plat->dump_debug_regs)
7667 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7668
7669 /* Let pm_runtime_put() disable the clocks.
7670 * If CONFIG_PM is not enabled, the clocks will stay powered.
7671 */
7672 pm_runtime_put(device);
7673
7674 return ret;
7675
7676 error_netdev_register:
7677 phylink_destroy(priv->phylink);
7678 error_xpcs_setup:
7679 error_phy_setup:
7680 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7681 priv->hw->pcs != STMMAC_PCS_RTBI)
7682 stmmac_mdio_unregister(ndev);
7683 error_mdio_register:
7684 stmmac_napi_del(ndev);
7685 error_hw_init:
7686 destroy_workqueue(priv->wq);
7687 error_wq_init:
7688 bitmap_free(priv->af_xdp_zc_qps);
7689
7690 return ret;
7691 }
7692 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7693
7694 /**
7695 * stmmac_dvr_remove
7696 * @dev: device pointer
7697 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7698 * changes the link status, releases the DMA descriptor rings.
7699 */
stmmac_dvr_remove(struct device * dev)7700 void stmmac_dvr_remove(struct device *dev)
7701 {
7702 struct net_device *ndev = dev_get_drvdata(dev);
7703 struct stmmac_priv *priv = netdev_priv(ndev);
7704
7705 netdev_info(priv->dev, "%s: removing driver", __func__);
7706
7707 pm_runtime_get_sync(dev);
7708
7709 stmmac_stop_all_dma(priv);
7710 stmmac_mac_set(priv, priv->ioaddr, false);
7711 netif_carrier_off(ndev);
7712 unregister_netdev(ndev);
7713
7714 #ifdef CONFIG_DEBUG_FS
7715 stmmac_exit_fs(ndev);
7716 #endif
7717 phylink_destroy(priv->phylink);
7718 if (priv->plat->stmmac_rst)
7719 reset_control_assert(priv->plat->stmmac_rst);
7720 reset_control_assert(priv->plat->stmmac_ahb_rst);
7721 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7722 priv->hw->pcs != STMMAC_PCS_RTBI)
7723 stmmac_mdio_unregister(ndev);
7724 destroy_workqueue(priv->wq);
7725 mutex_destroy(&priv->lock);
7726 bitmap_free(priv->af_xdp_zc_qps);
7727
7728 pm_runtime_disable(dev);
7729 pm_runtime_put_noidle(dev);
7730 }
7731 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7732
7733 /**
7734 * stmmac_suspend - suspend callback
7735 * @dev: device pointer
7736 * Description: this is the function to suspend the device and it is called
7737 * by the platform driver to stop the network queue, release the resources,
7738 * program the PMT register (for WoL), clean and release driver resources.
7739 */
stmmac_suspend(struct device * dev)7740 int stmmac_suspend(struct device *dev)
7741 {
7742 struct net_device *ndev = dev_get_drvdata(dev);
7743 struct stmmac_priv *priv = netdev_priv(ndev);
7744 u32 chan;
7745
7746 if (!ndev || !netif_running(ndev))
7747 return 0;
7748
7749 mutex_lock(&priv->lock);
7750
7751 netif_device_detach(ndev);
7752
7753 stmmac_disable_all_queues(priv);
7754
7755 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7756 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7757
7758 if (priv->eee_enabled) {
7759 priv->tx_path_in_lpi_mode = false;
7760 del_timer_sync(&priv->eee_ctrl_timer);
7761 }
7762
7763 /* Stop TX/RX DMA */
7764 stmmac_stop_all_dma(priv);
7765
7766 if (priv->plat->serdes_powerdown)
7767 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7768
7769 /* Enable Power down mode by programming the PMT regs */
7770 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7771 stmmac_pmt(priv, priv->hw, priv->wolopts);
7772 priv->irq_wake = 1;
7773 } else {
7774 stmmac_mac_set(priv, priv->ioaddr, false);
7775 pinctrl_pm_select_sleep_state(priv->device);
7776 }
7777
7778 mutex_unlock(&priv->lock);
7779
7780 rtnl_lock();
7781 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7782 phylink_suspend(priv->phylink, true);
7783 } else {
7784 if (device_may_wakeup(priv->device))
7785 phylink_speed_down(priv->phylink, false);
7786 phylink_suspend(priv->phylink, false);
7787 }
7788 rtnl_unlock();
7789
7790 if (priv->dma_cap.fpesel) {
7791 /* Disable FPE */
7792 stmmac_fpe_configure(priv, priv->ioaddr,
7793 priv->plat->fpe_cfg,
7794 priv->plat->tx_queues_to_use,
7795 priv->plat->rx_queues_to_use, false);
7796
7797 stmmac_fpe_handshake(priv, false);
7798 stmmac_fpe_stop_wq(priv);
7799 }
7800
7801 priv->speed = SPEED_UNKNOWN;
7802 return 0;
7803 }
7804 EXPORT_SYMBOL_GPL(stmmac_suspend);
7805
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7806 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7807 {
7808 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7809
7810 rx_q->cur_rx = 0;
7811 rx_q->dirty_rx = 0;
7812 }
7813
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7814 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7815 {
7816 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7817
7818 tx_q->cur_tx = 0;
7819 tx_q->dirty_tx = 0;
7820 tx_q->mss = 0;
7821
7822 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7823 }
7824
7825 /**
7826 * stmmac_reset_queues_param - reset queue parameters
7827 * @priv: device pointer
7828 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7829 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7830 {
7831 u32 rx_cnt = priv->plat->rx_queues_to_use;
7832 u32 tx_cnt = priv->plat->tx_queues_to_use;
7833 u32 queue;
7834
7835 for (queue = 0; queue < rx_cnt; queue++)
7836 stmmac_reset_rx_queue(priv, queue);
7837
7838 for (queue = 0; queue < tx_cnt; queue++)
7839 stmmac_reset_tx_queue(priv, queue);
7840 }
7841
7842 /**
7843 * stmmac_resume - resume callback
7844 * @dev: device pointer
7845 * Description: when resume this function is invoked to setup the DMA and CORE
7846 * in a usable state.
7847 */
stmmac_resume(struct device * dev)7848 int stmmac_resume(struct device *dev)
7849 {
7850 struct net_device *ndev = dev_get_drvdata(dev);
7851 struct stmmac_priv *priv = netdev_priv(ndev);
7852 int ret;
7853
7854 if (!netif_running(ndev))
7855 return 0;
7856
7857 /* Power Down bit, into the PM register, is cleared
7858 * automatically as soon as a magic packet or a Wake-up frame
7859 * is received. Anyway, it's better to manually clear
7860 * this bit because it can generate problems while resuming
7861 * from another devices (e.g. serial console).
7862 */
7863 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7864 mutex_lock(&priv->lock);
7865 stmmac_pmt(priv, priv->hw, 0);
7866 mutex_unlock(&priv->lock);
7867 priv->irq_wake = 0;
7868 } else {
7869 pinctrl_pm_select_default_state(priv->device);
7870 /* reset the phy so that it's ready */
7871 if (priv->mii)
7872 stmmac_mdio_reset(priv->mii);
7873 }
7874
7875 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7876 priv->plat->serdes_powerup) {
7877 ret = priv->plat->serdes_powerup(ndev,
7878 priv->plat->bsp_priv);
7879
7880 if (ret < 0)
7881 return ret;
7882 }
7883
7884 rtnl_lock();
7885 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7886 phylink_resume(priv->phylink);
7887 } else {
7888 phylink_resume(priv->phylink);
7889 if (device_may_wakeup(priv->device))
7890 phylink_speed_up(priv->phylink);
7891 }
7892 rtnl_unlock();
7893
7894 rtnl_lock();
7895 mutex_lock(&priv->lock);
7896
7897 stmmac_reset_queues_param(priv);
7898
7899 stmmac_free_tx_skbufs(priv);
7900 stmmac_clear_descriptors(priv, &priv->dma_conf);
7901
7902 stmmac_hw_setup(ndev, false);
7903 stmmac_init_coalesce(priv);
7904 stmmac_set_rx_mode(ndev);
7905
7906 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7907
7908 stmmac_enable_all_queues(priv);
7909 stmmac_enable_all_dma_irq(priv);
7910
7911 mutex_unlock(&priv->lock);
7912 rtnl_unlock();
7913
7914 netif_device_attach(ndev);
7915
7916 return 0;
7917 }
7918 EXPORT_SYMBOL_GPL(stmmac_resume);
7919
7920 #ifndef MODULE
stmmac_cmdline_opt(char * str)7921 static int __init stmmac_cmdline_opt(char *str)
7922 {
7923 char *opt;
7924
7925 if (!str || !*str)
7926 return 1;
7927 while ((opt = strsep(&str, ",")) != NULL) {
7928 if (!strncmp(opt, "debug:", 6)) {
7929 if (kstrtoint(opt + 6, 0, &debug))
7930 goto err;
7931 } else if (!strncmp(opt, "phyaddr:", 8)) {
7932 if (kstrtoint(opt + 8, 0, &phyaddr))
7933 goto err;
7934 } else if (!strncmp(opt, "buf_sz:", 7)) {
7935 if (kstrtoint(opt + 7, 0, &buf_sz))
7936 goto err;
7937 } else if (!strncmp(opt, "tc:", 3)) {
7938 if (kstrtoint(opt + 3, 0, &tc))
7939 goto err;
7940 } else if (!strncmp(opt, "watchdog:", 9)) {
7941 if (kstrtoint(opt + 9, 0, &watchdog))
7942 goto err;
7943 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7944 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7945 goto err;
7946 } else if (!strncmp(opt, "pause:", 6)) {
7947 if (kstrtoint(opt + 6, 0, &pause))
7948 goto err;
7949 } else if (!strncmp(opt, "eee_timer:", 10)) {
7950 if (kstrtoint(opt + 10, 0, &eee_timer))
7951 goto err;
7952 } else if (!strncmp(opt, "chain_mode:", 11)) {
7953 if (kstrtoint(opt + 11, 0, &chain_mode))
7954 goto err;
7955 }
7956 }
7957 return 1;
7958
7959 err:
7960 pr_err("%s: ERROR broken module parameter conversion", __func__);
7961 return 1;
7962 }
7963
7964 __setup("stmmaceth=", stmmac_cmdline_opt);
7965 #endif /* MODULE */
7966
stmmac_init(void)7967 static int __init stmmac_init(void)
7968 {
7969 #ifdef CONFIG_DEBUG_FS
7970 /* Create debugfs main directory if it doesn't exist yet */
7971 if (!stmmac_fs_dir)
7972 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7973 register_netdevice_notifier(&stmmac_notifier);
7974 #endif
7975
7976 return 0;
7977 }
7978
stmmac_exit(void)7979 static void __exit stmmac_exit(void)
7980 {
7981 #ifdef CONFIG_DEBUG_FS
7982 unregister_netdevice_notifier(&stmmac_notifier);
7983 debugfs_remove_recursive(stmmac_fs_dir);
7984 #endif
7985 }
7986
7987 module_init(stmmac_init)
7988 module_exit(stmmac_exit)
7989
7990 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7991 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7992 MODULE_LICENSE("GPL");
7993