1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet Switch device driver
3  *
4  * Copyright (C) 2022 Renesas Electronics Corporation
5  */
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/err.h>
9 #include <linux/etherdevice.h>
10 #include <linux/iopoll.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/of.h>
15 #include <linux/of_device.h>
16 #include <linux/of_irq.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
19 #include <linux/phylink.h>
20 #include <linux/phy/phy.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 
26 #include "rswitch.h"
27 
28 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
29 {
30 	u32 val;
31 
32 	return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
33 					 1, RSWITCH_TIMEOUT_US);
34 }
35 
36 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
37 {
38 	iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
39 }
40 
41 /* Common Agent block (COMA) */
42 static void rswitch_reset(struct rswitch_private *priv)
43 {
44 	iowrite32(RRC_RR, priv->addr + RRC);
45 	iowrite32(RRC_RR_CLR, priv->addr + RRC);
46 }
47 
48 static void rswitch_clock_enable(struct rswitch_private *priv)
49 {
50 	iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
51 }
52 
53 static void rswitch_clock_disable(struct rswitch_private *priv)
54 {
55 	iowrite32(RCDC_RCD, priv->addr + RCDC);
56 }
57 
58 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
59 {
60 	u32 val = ioread32(coma_addr + RCEC);
61 
62 	if (val & RCEC_RCE)
63 		return (val & BIT(port)) ? true : false;
64 	else
65 		return false;
66 }
67 
68 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable)
69 {
70 	u32 val;
71 
72 	if (enable) {
73 		val = ioread32(coma_addr + RCEC);
74 		iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
75 	} else {
76 		val = ioread32(coma_addr + RCDC);
77 		iowrite32(val | BIT(port), coma_addr + RCDC);
78 	}
79 }
80 
81 static int rswitch_bpool_config(struct rswitch_private *priv)
82 {
83 	u32 val;
84 
85 	val = ioread32(priv->addr + CABPIRM);
86 	if (val & CABPIRM_BPR)
87 		return 0;
88 
89 	iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
90 
91 	return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
92 }
93 
94 /* R-Switch-2 block (TOP) */
95 static void rswitch_top_init(struct rswitch_private *priv)
96 {
97 	int i;
98 
99 	for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
100 		iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
101 }
102 
103 /* Forwarding engine block (MFWD) */
104 static void rswitch_fwd_init(struct rswitch_private *priv)
105 {
106 	int i;
107 
108 	/* For ETHA */
109 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
110 		iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
111 		iowrite32(0, priv->addr + FWPBFC(i));
112 	}
113 
114 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
115 		iowrite32(priv->rdev[i]->rx_queue->index,
116 			  priv->addr + FWPBFCSDC(GWCA_INDEX, i));
117 		iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
118 	}
119 
120 	/* For GWCA */
121 	iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
122 	iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
123 	iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
124 	iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
125 }
126 
127 /* gPTP timer (gPTP) */
128 static void rswitch_get_timestamp(struct rswitch_private *priv,
129 				  struct timespec64 *ts)
130 {
131 	priv->ptp_priv->info.gettime64(&priv->ptp_priv->info, ts);
132 }
133 
134 /* Gateway CPU agent block (GWCA) */
135 static int rswitch_gwca_change_mode(struct rswitch_private *priv,
136 				    enum rswitch_gwca_mode mode)
137 {
138 	int ret;
139 
140 	if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
141 		rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
142 
143 	iowrite32(mode, priv->addr + GWMC);
144 
145 	ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
146 
147 	if (mode == GWMC_OPC_DISABLE)
148 		rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
149 
150 	return ret;
151 }
152 
153 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
154 {
155 	iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
156 
157 	return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
158 }
159 
160 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
161 {
162 	iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
163 
164 	return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
165 }
166 
167 static void rswitch_gwca_set_rate_limit(struct rswitch_private *priv, int rate)
168 {
169 	u32 gwgrlulc, gwgrlc;
170 
171 	switch (rate) {
172 	case 1000:
173 		gwgrlulc = 0x0000005f;
174 		gwgrlc = 0x00010260;
175 		break;
176 	default:
177 		dev_err(&priv->pdev->dev, "%s: This rate is not supported (%d)\n", __func__, rate);
178 		return;
179 	}
180 
181 	iowrite32(gwgrlulc, priv->addr + GWGRLULC);
182 	iowrite32(gwgrlc, priv->addr + GWGRLC);
183 }
184 
185 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
186 {
187 	u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
188 	int i;
189 
190 	for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
191 		if (dis[i] & mask[i])
192 			return true;
193 	}
194 
195 	return false;
196 }
197 
198 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
199 {
200 	int i;
201 
202 	for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
203 		dis[i] = ioread32(priv->addr + GWDIS(i));
204 		dis[i] &= ioread32(priv->addr + GWDIE(i));
205 	}
206 }
207 
208 static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable)
209 {
210 	u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
211 
212 	iowrite32(BIT(index % 32), priv->addr + offs);
213 }
214 
215 static void rswitch_ack_data_irq(struct rswitch_private *priv, int index)
216 {
217 	u32 offs = GWDIS(index / 32);
218 
219 	iowrite32(BIT(index % 32), priv->addr + offs);
220 }
221 
222 static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num)
223 {
224 	int index = cur ? gq->cur : gq->dirty;
225 
226 	if (index + num >= gq->ring_size)
227 		index = (index + num) % gq->ring_size;
228 	else
229 		index += num;
230 
231 	return index;
232 }
233 
234 static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
235 {
236 	if (gq->cur >= gq->dirty)
237 		return gq->cur - gq->dirty;
238 	else
239 		return gq->ring_size - gq->dirty + gq->cur;
240 }
241 
242 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
243 {
244 	struct rswitch_ext_ts_desc *desc = &gq->ts_ring[gq->dirty];
245 
246 	if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
247 		return true;
248 
249 	return false;
250 }
251 
252 static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
253 					int start_index, int num)
254 {
255 	int i, index;
256 
257 	for (i = 0; i < num; i++) {
258 		index = (i + start_index) % gq->ring_size;
259 		if (gq->skbs[index])
260 			continue;
261 		gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
262 							    PKT_BUF_SZ + RSWITCH_ALIGN - 1);
263 		if (!gq->skbs[index])
264 			goto err;
265 	}
266 
267 	return 0;
268 
269 err:
270 	for (i--; i >= 0; i--) {
271 		index = (i + start_index) % gq->ring_size;
272 		dev_kfree_skb(gq->skbs[index]);
273 		gq->skbs[index] = NULL;
274 	}
275 
276 	return -ENOMEM;
277 }
278 
279 static void rswitch_gwca_queue_free(struct net_device *ndev,
280 				    struct rswitch_gwca_queue *gq)
281 {
282 	int i;
283 
284 	if (gq->gptp) {
285 		dma_free_coherent(ndev->dev.parent,
286 				  sizeof(struct rswitch_ext_ts_desc) *
287 				  (gq->ring_size + 1), gq->ts_ring, gq->ring_dma);
288 		gq->ts_ring = NULL;
289 	} else {
290 		dma_free_coherent(ndev->dev.parent,
291 				  sizeof(struct rswitch_ext_desc) *
292 				  (gq->ring_size + 1), gq->ring, gq->ring_dma);
293 		gq->ring = NULL;
294 	}
295 
296 	if (!gq->dir_tx) {
297 		for (i = 0; i < gq->ring_size; i++)
298 			dev_kfree_skb(gq->skbs[i]);
299 	}
300 
301 	kfree(gq->skbs);
302 	gq->skbs = NULL;
303 }
304 
305 static int rswitch_gwca_queue_alloc(struct net_device *ndev,
306 				    struct rswitch_private *priv,
307 				    struct rswitch_gwca_queue *gq,
308 				    bool dir_tx, bool gptp, int ring_size)
309 {
310 	int i, bit;
311 
312 	gq->dir_tx = dir_tx;
313 	gq->gptp = gptp;
314 	gq->ring_size = ring_size;
315 	gq->ndev = ndev;
316 
317 	gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
318 	if (!gq->skbs)
319 		return -ENOMEM;
320 
321 	if (!dir_tx)
322 		rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
323 
324 	if (gptp)
325 		gq->ts_ring = dma_alloc_coherent(ndev->dev.parent,
326 						 sizeof(struct rswitch_ext_ts_desc) *
327 						 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
328 	else
329 		gq->ring = dma_alloc_coherent(ndev->dev.parent,
330 					      sizeof(struct rswitch_ext_desc) *
331 					      (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
332 	if (!gq->ts_ring && !gq->ring)
333 		goto out;
334 
335 	i = gq->index / 32;
336 	bit = BIT(gq->index % 32);
337 	if (dir_tx)
338 		priv->gwca.tx_irq_bits[i] |= bit;
339 	else
340 		priv->gwca.rx_irq_bits[i] |= bit;
341 
342 	return 0;
343 
344 out:
345 	rswitch_gwca_queue_free(ndev, gq);
346 
347 	return -ENOMEM;
348 }
349 
350 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
351 {
352 	desc->dptrl = cpu_to_le32(lower_32_bits(addr));
353 	desc->dptrh = upper_32_bits(addr) & 0xff;
354 }
355 
356 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
357 {
358 	return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
359 }
360 
361 static int rswitch_gwca_queue_format(struct net_device *ndev,
362 				     struct rswitch_private *priv,
363 				     struct rswitch_gwca_queue *gq)
364 {
365 	int tx_ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
366 	struct rswitch_ext_desc *desc;
367 	struct rswitch_desc *linkfix;
368 	dma_addr_t dma_addr;
369 	int i;
370 
371 	memset(gq->ring, 0, tx_ring_size);
372 	for (i = 0, desc = gq->ring; i < gq->ring_size; i++, desc++) {
373 		if (!gq->dir_tx) {
374 			dma_addr = dma_map_single(ndev->dev.parent,
375 						  gq->skbs[i]->data, PKT_BUF_SZ,
376 						  DMA_FROM_DEVICE);
377 			if (dma_mapping_error(ndev->dev.parent, dma_addr))
378 				goto err;
379 
380 			desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
381 			rswitch_desc_set_dptr(&desc->desc, dma_addr);
382 			desc->desc.die_dt = DT_FEMPTY | DIE;
383 		} else {
384 			desc->desc.die_dt = DT_EEMPTY | DIE;
385 		}
386 	}
387 	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
388 	desc->desc.die_dt = DT_LINKFIX;
389 
390 	linkfix = &priv->linkfix_table[gq->index];
391 	linkfix->die_dt = DT_LINKFIX;
392 	rswitch_desc_set_dptr(linkfix, gq->ring_dma);
393 
394 	iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DQT : 0) | GWDCC_EDE,
395 		  priv->addr + GWDCC_OFFS(gq->index));
396 
397 	return 0;
398 
399 err:
400 	if (!gq->dir_tx) {
401 		for (i--, desc = gq->ring; i >= 0; i--, desc++) {
402 			dma_addr = rswitch_desc_get_dptr(&desc->desc);
403 			dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
404 					 DMA_FROM_DEVICE);
405 		}
406 	}
407 
408 	return -ENOMEM;
409 }
410 
411 static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
412 				      struct rswitch_gwca_queue *gq,
413 				      int start_index, int num)
414 {
415 	struct rswitch_device *rdev = netdev_priv(ndev);
416 	struct rswitch_ext_ts_desc *desc;
417 	dma_addr_t dma_addr;
418 	int i, index;
419 
420 	for (i = 0; i < num; i++) {
421 		index = (i + start_index) % gq->ring_size;
422 		desc = &gq->ts_ring[index];
423 		if (!gq->dir_tx) {
424 			dma_addr = dma_map_single(ndev->dev.parent,
425 						  gq->skbs[index]->data, PKT_BUF_SZ,
426 						  DMA_FROM_DEVICE);
427 			if (dma_mapping_error(ndev->dev.parent, dma_addr))
428 				goto err;
429 
430 			desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
431 			rswitch_desc_set_dptr(&desc->desc, dma_addr);
432 			dma_wmb();
433 			desc->desc.die_dt = DT_FEMPTY | DIE;
434 			desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
435 		} else {
436 			desc->desc.die_dt = DT_EEMPTY | DIE;
437 		}
438 	}
439 
440 	return 0;
441 
442 err:
443 	if (!gq->dir_tx) {
444 		for (i--; i >= 0; i--) {
445 			index = (i + start_index) % gq->ring_size;
446 			desc = &gq->ts_ring[index];
447 			dma_addr = rswitch_desc_get_dptr(&desc->desc);
448 			dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
449 					 DMA_FROM_DEVICE);
450 		}
451 	}
452 
453 	return -ENOMEM;
454 }
455 
456 static int rswitch_gwca_queue_ts_format(struct net_device *ndev,
457 					struct rswitch_private *priv,
458 					struct rswitch_gwca_queue *gq)
459 {
460 	int tx_ts_ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
461 	struct rswitch_ext_ts_desc *desc;
462 	struct rswitch_desc *linkfix;
463 	int err;
464 
465 	memset(gq->ts_ring, 0, tx_ts_ring_size);
466 	err = rswitch_gwca_queue_ts_fill(ndev, gq, 0, gq->ring_size);
467 	if (err < 0)
468 		return err;
469 
470 	desc = &gq->ts_ring[gq->ring_size];	/* Last */
471 	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
472 	desc->desc.die_dt = DT_LINKFIX;
473 
474 	linkfix = &priv->linkfix_table[gq->index];
475 	linkfix->die_dt = DT_LINKFIX;
476 	rswitch_desc_set_dptr(linkfix, gq->ring_dma);
477 
478 	iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DQT : 0) | GWDCC_ETS | GWDCC_EDE,
479 		  priv->addr + GWDCC_OFFS(gq->index));
480 
481 	return 0;
482 }
483 
484 static int rswitch_gwca_desc_alloc(struct rswitch_private *priv)
485 {
486 	int i, num_queues = priv->gwca.num_queues;
487 	struct device *dev = &priv->pdev->dev;
488 
489 	priv->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
490 	priv->linkfix_table = dma_alloc_coherent(dev, priv->linkfix_table_size,
491 						 &priv->linkfix_table_dma, GFP_KERNEL);
492 	if (!priv->linkfix_table)
493 		return -ENOMEM;
494 	for (i = 0; i < num_queues; i++)
495 		priv->linkfix_table[i].die_dt = DT_EOS;
496 
497 	return 0;
498 }
499 
500 static void rswitch_gwca_desc_free(struct rswitch_private *priv)
501 {
502 	if (priv->linkfix_table)
503 		dma_free_coherent(&priv->pdev->dev, priv->linkfix_table_size,
504 				  priv->linkfix_table, priv->linkfix_table_dma);
505 	priv->linkfix_table = NULL;
506 }
507 
508 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
509 {
510 	struct rswitch_gwca_queue *gq;
511 	int index;
512 
513 	index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
514 	if (index >= priv->gwca.num_queues)
515 		return NULL;
516 	set_bit(index, priv->gwca.used);
517 	gq = &priv->gwca.queues[index];
518 	memset(gq, 0, sizeof(*gq));
519 	gq->index = index;
520 
521 	return gq;
522 }
523 
524 static void rswitch_gwca_put(struct rswitch_private *priv,
525 			     struct rswitch_gwca_queue *gq)
526 {
527 	clear_bit(gq->index, priv->gwca.used);
528 }
529 
530 static int rswitch_txdmac_alloc(struct net_device *ndev)
531 {
532 	struct rswitch_device *rdev = netdev_priv(ndev);
533 	struct rswitch_private *priv = rdev->priv;
534 	int err;
535 
536 	rdev->tx_queue = rswitch_gwca_get(priv);
537 	if (!rdev->tx_queue)
538 		return -EBUSY;
539 
540 	err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
541 				       TX_RING_SIZE);
542 	if (err < 0) {
543 		rswitch_gwca_put(priv, rdev->tx_queue);
544 		return err;
545 	}
546 
547 	return 0;
548 }
549 
550 static void rswitch_txdmac_free(struct net_device *ndev)
551 {
552 	struct rswitch_device *rdev = netdev_priv(ndev);
553 
554 	rswitch_gwca_queue_free(ndev, rdev->tx_queue);
555 	rswitch_gwca_put(rdev->priv, rdev->tx_queue);
556 }
557 
558 static int rswitch_txdmac_init(struct rswitch_private *priv, int index)
559 {
560 	struct rswitch_device *rdev = priv->rdev[index];
561 
562 	return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
563 }
564 
565 static int rswitch_rxdmac_alloc(struct net_device *ndev)
566 {
567 	struct rswitch_device *rdev = netdev_priv(ndev);
568 	struct rswitch_private *priv = rdev->priv;
569 	int err;
570 
571 	rdev->rx_queue = rswitch_gwca_get(priv);
572 	if (!rdev->rx_queue)
573 		return -EBUSY;
574 
575 	err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
576 				       RX_RING_SIZE);
577 	if (err < 0) {
578 		rswitch_gwca_put(priv, rdev->rx_queue);
579 		return err;
580 	}
581 
582 	return 0;
583 }
584 
585 static void rswitch_rxdmac_free(struct net_device *ndev)
586 {
587 	struct rswitch_device *rdev = netdev_priv(ndev);
588 
589 	rswitch_gwca_queue_free(ndev, rdev->rx_queue);
590 	rswitch_gwca_put(rdev->priv, rdev->rx_queue);
591 }
592 
593 static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
594 {
595 	struct rswitch_device *rdev = priv->rdev[index];
596 	struct net_device *ndev = rdev->ndev;
597 
598 	return rswitch_gwca_queue_ts_format(ndev, priv, rdev->rx_queue);
599 }
600 
601 static int rswitch_gwca_hw_init(struct rswitch_private *priv)
602 {
603 	int i, err;
604 
605 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
606 	if (err < 0)
607 		return err;
608 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
609 	if (err < 0)
610 		return err;
611 
612 	err = rswitch_gwca_mcast_table_reset(priv);
613 	if (err < 0)
614 		return err;
615 	err = rswitch_gwca_axi_ram_reset(priv);
616 	if (err < 0)
617 		return err;
618 
619 	iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
620 	iowrite32(0, priv->addr + GWTTFC);
621 	iowrite32(lower_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC1);
622 	iowrite32(upper_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC0);
623 	rswitch_gwca_set_rate_limit(priv, priv->gwca.speed);
624 
625 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
626 		err = rswitch_rxdmac_init(priv, i);
627 		if (err < 0)
628 			return err;
629 		err = rswitch_txdmac_init(priv, i);
630 		if (err < 0)
631 			return err;
632 	}
633 
634 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
635 	if (err < 0)
636 		return err;
637 	return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
638 }
639 
640 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
641 {
642 	int err;
643 
644 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
645 	if (err < 0)
646 		return err;
647 	err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
648 	if (err < 0)
649 		return err;
650 
651 	return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
652 }
653 
654 static int rswitch_gwca_halt(struct rswitch_private *priv)
655 {
656 	int err;
657 
658 	priv->gwca_halt = true;
659 	err = rswitch_gwca_hw_deinit(priv);
660 	dev_err(&priv->pdev->dev, "halted (%d)\n", err);
661 
662 	return err;
663 }
664 
665 static bool rswitch_rx(struct net_device *ndev, int *quota)
666 {
667 	struct rswitch_device *rdev = netdev_priv(ndev);
668 	struct rswitch_gwca_queue *gq = rdev->rx_queue;
669 	struct rswitch_ext_ts_desc *desc;
670 	int limit, boguscnt, num, ret;
671 	struct sk_buff *skb;
672 	dma_addr_t dma_addr;
673 	u16 pkt_len;
674 	u32 get_ts;
675 
676 	boguscnt = min_t(int, gq->ring_size, *quota);
677 	limit = boguscnt;
678 
679 	desc = &gq->ts_ring[gq->cur];
680 	while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
681 		if (--boguscnt < 0)
682 			break;
683 		dma_rmb();
684 		pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
685 		skb = gq->skbs[gq->cur];
686 		gq->skbs[gq->cur] = NULL;
687 		dma_addr = rswitch_desc_get_dptr(&desc->desc);
688 		dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE);
689 		get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
690 		if (get_ts) {
691 			struct skb_shared_hwtstamps *shhwtstamps;
692 			struct timespec64 ts;
693 
694 			shhwtstamps = skb_hwtstamps(skb);
695 			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
696 			ts.tv_sec = __le32_to_cpu(desc->ts_sec);
697 			ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
698 			shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
699 		}
700 		skb_put(skb, pkt_len);
701 		skb->protocol = eth_type_trans(skb, ndev);
702 		netif_receive_skb(skb);
703 		rdev->ndev->stats.rx_packets++;
704 		rdev->ndev->stats.rx_bytes += pkt_len;
705 
706 		gq->cur = rswitch_next_queue_index(gq, true, 1);
707 		desc = &gq->ts_ring[gq->cur];
708 	}
709 
710 	num = rswitch_get_num_cur_queues(gq);
711 	ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
712 	if (ret < 0)
713 		goto err;
714 	ret = rswitch_gwca_queue_ts_fill(ndev, gq, gq->dirty, num);
715 	if (ret < 0)
716 		goto err;
717 	gq->dirty = rswitch_next_queue_index(gq, false, num);
718 
719 	*quota -= limit - (++boguscnt);
720 
721 	return boguscnt <= 0;
722 
723 err:
724 	rswitch_gwca_halt(rdev->priv);
725 
726 	return 0;
727 }
728 
729 static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
730 {
731 	struct rswitch_device *rdev = netdev_priv(ndev);
732 	struct rswitch_gwca_queue *gq = rdev->tx_queue;
733 	struct rswitch_ext_desc *desc;
734 	dma_addr_t dma_addr;
735 	struct sk_buff *skb;
736 	int free_num = 0;
737 	int size;
738 
739 	for (; rswitch_get_num_cur_queues(gq) > 0;
740 	     gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
741 		desc = &gq->ring[gq->dirty];
742 		if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
743 			break;
744 
745 		dma_rmb();
746 		size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
747 		skb = gq->skbs[gq->dirty];
748 		if (skb) {
749 			if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
750 				struct skb_shared_hwtstamps shhwtstamps;
751 				struct timespec64 ts;
752 
753 				rswitch_get_timestamp(rdev->priv, &ts);
754 				memset(&shhwtstamps, 0, sizeof(shhwtstamps));
755 				shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
756 				skb_tstamp_tx(skb, &shhwtstamps);
757 			}
758 			dma_addr = rswitch_desc_get_dptr(&desc->desc);
759 			dma_unmap_single(ndev->dev.parent, dma_addr,
760 					 size, DMA_TO_DEVICE);
761 			dev_kfree_skb_any(gq->skbs[gq->dirty]);
762 			gq->skbs[gq->dirty] = NULL;
763 			free_num++;
764 		}
765 		desc->desc.die_dt = DT_EEMPTY;
766 		rdev->ndev->stats.tx_packets++;
767 		rdev->ndev->stats.tx_bytes += size;
768 	}
769 
770 	return free_num;
771 }
772 
773 static int rswitch_poll(struct napi_struct *napi, int budget)
774 {
775 	struct net_device *ndev = napi->dev;
776 	struct rswitch_private *priv;
777 	struct rswitch_device *rdev;
778 	int quota = budget;
779 
780 	rdev = netdev_priv(ndev);
781 	priv = rdev->priv;
782 
783 retry:
784 	rswitch_tx_free(ndev, true);
785 
786 	if (rswitch_rx(ndev, &quota))
787 		goto out;
788 	else if (rdev->priv->gwca_halt)
789 		goto err;
790 	else if (rswitch_is_queue_rxed(rdev->rx_queue))
791 		goto retry;
792 
793 	netif_wake_subqueue(ndev, 0);
794 
795 	napi_complete(napi);
796 
797 	rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
798 	rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
799 
800 out:
801 	return budget - quota;
802 
803 err:
804 	napi_complete(napi);
805 
806 	return 0;
807 }
808 
809 static void rswitch_queue_interrupt(struct net_device *ndev)
810 {
811 	struct rswitch_device *rdev = netdev_priv(ndev);
812 
813 	if (napi_schedule_prep(&rdev->napi)) {
814 		rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
815 		rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
816 		__napi_schedule(&rdev->napi);
817 	}
818 }
819 
820 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
821 {
822 	struct rswitch_gwca_queue *gq;
823 	int i, index, bit;
824 
825 	for (i = 0; i < priv->gwca.num_queues; i++) {
826 		gq = &priv->gwca.queues[i];
827 		index = gq->index / 32;
828 		bit = BIT(gq->index % 32);
829 		if (!(dis[index] & bit))
830 			continue;
831 
832 		rswitch_ack_data_irq(priv, gq->index);
833 		rswitch_queue_interrupt(gq->ndev);
834 	}
835 
836 	return IRQ_HANDLED;
837 }
838 
839 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
840 {
841 	struct rswitch_private *priv = dev_id;
842 	u32 dis[RSWITCH_NUM_IRQ_REGS];
843 	irqreturn_t ret = IRQ_NONE;
844 
845 	rswitch_get_data_irq_status(priv, dis);
846 
847 	if (rswitch_is_any_data_irq(priv, dis, true) ||
848 	    rswitch_is_any_data_irq(priv, dis, false))
849 		ret = rswitch_data_irq(priv, dis);
850 
851 	return ret;
852 }
853 
854 static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
855 {
856 	char *resource_name, *irq_name;
857 	int i, ret, irq;
858 
859 	for (i = 0; i < GWCA_NUM_IRQS; i++) {
860 		resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
861 		if (!resource_name)
862 			return -ENOMEM;
863 
864 		irq = platform_get_irq_byname(priv->pdev, resource_name);
865 		kfree(resource_name);
866 		if (irq < 0)
867 			return irq;
868 
869 		irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
870 					  GWCA_IRQ_NAME, i);
871 		if (!irq_name)
872 			return -ENOMEM;
873 
874 		ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
875 				       0, irq_name, priv);
876 		if (ret < 0)
877 			return ret;
878 	}
879 
880 	return 0;
881 }
882 
883 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
884 static int rswitch_etha_change_mode(struct rswitch_etha *etha,
885 				    enum rswitch_etha_mode mode)
886 {
887 	int ret;
888 
889 	if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
890 		rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
891 
892 	iowrite32(mode, etha->addr + EAMC);
893 
894 	ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
895 
896 	if (mode == EAMC_OPC_DISABLE)
897 		rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
898 
899 	return ret;
900 }
901 
902 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
903 {
904 	u32 mrmac0 = ioread32(etha->addr + MRMAC0);
905 	u32 mrmac1 = ioread32(etha->addr + MRMAC1);
906 	u8 *mac = &etha->mac_addr[0];
907 
908 	mac[0] = (mrmac0 >>  8) & 0xFF;
909 	mac[1] = (mrmac0 >>  0) & 0xFF;
910 	mac[2] = (mrmac1 >> 24) & 0xFF;
911 	mac[3] = (mrmac1 >> 16) & 0xFF;
912 	mac[4] = (mrmac1 >>  8) & 0xFF;
913 	mac[5] = (mrmac1 >>  0) & 0xFF;
914 }
915 
916 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
917 {
918 	iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
919 	iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
920 		  etha->addr + MRMAC1);
921 }
922 
923 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
924 {
925 	iowrite32(MLVC_PLV, etha->addr + MLVC);
926 
927 	return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
928 }
929 
930 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
931 {
932 	u32 val;
933 
934 	rswitch_etha_write_mac_address(etha, mac);
935 
936 	switch (etha->speed) {
937 	case 100:
938 		val = MPIC_LSC_100M;
939 		break;
940 	case 1000:
941 		val = MPIC_LSC_1G;
942 		break;
943 	case 2500:
944 		val = MPIC_LSC_2_5G;
945 		break;
946 	default:
947 		return;
948 	}
949 
950 	iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
951 }
952 
953 static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
954 {
955 	rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
956 		       MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06));
957 	rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
958 }
959 
960 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
961 {
962 	int err;
963 
964 	err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
965 	if (err < 0)
966 		return err;
967 	err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
968 	if (err < 0)
969 		return err;
970 
971 	iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
972 	rswitch_rmac_setting(etha, mac);
973 	rswitch_etha_enable_mii(etha);
974 
975 	err = rswitch_etha_wait_link_verification(etha);
976 	if (err < 0)
977 		return err;
978 
979 	err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
980 	if (err < 0)
981 		return err;
982 
983 	return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
984 }
985 
986 static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
987 				   int phyad, int devad, int regad, int data)
988 {
989 	int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
990 	u32 val;
991 	int ret;
992 
993 	if (devad == 0xffffffff)
994 		return -ENODEV;
995 
996 	writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
997 
998 	val = MPSM_PSME | MPSM_MFF_C45;
999 	iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1000 
1001 	ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1002 	if (ret)
1003 		return ret;
1004 
1005 	rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1006 
1007 	if (read) {
1008 		writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1009 
1010 		ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1011 		if (ret)
1012 			return ret;
1013 
1014 		ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
1015 
1016 		rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1017 	} else {
1018 		iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
1019 			  etha->addr + MPSM);
1020 
1021 		ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
1022 	}
1023 
1024 	return ret;
1025 }
1026 
1027 static int rswitch_etha_mii_read(struct mii_bus *bus, int addr, int regnum)
1028 {
1029 	struct rswitch_etha *etha = bus->priv;
1030 	int mode, devad, regad;
1031 
1032 	mode = regnum & MII_ADDR_C45;
1033 	devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
1034 	regad = regnum & MII_REGADDR_C45_MASK;
1035 
1036 	/* Not support Clause 22 access method */
1037 	if (!mode)
1038 		return -EOPNOTSUPP;
1039 
1040 	return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
1041 }
1042 
1043 static int rswitch_etha_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
1044 {
1045 	struct rswitch_etha *etha = bus->priv;
1046 	int mode, devad, regad;
1047 
1048 	mode = regnum & MII_ADDR_C45;
1049 	devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
1050 	regad = regnum & MII_REGADDR_C45_MASK;
1051 
1052 	/* Not support Clause 22 access method */
1053 	if (!mode)
1054 		return -EOPNOTSUPP;
1055 
1056 	return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
1057 }
1058 
1059 /* Call of_node_put(port) after done */
1060 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
1061 {
1062 	struct device_node *ports, *port;
1063 	int err = 0;
1064 	u32 index;
1065 
1066 	ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
1067 				     "ethernet-ports");
1068 	if (!ports)
1069 		return NULL;
1070 
1071 	for_each_child_of_node(ports, port) {
1072 		err = of_property_read_u32(port, "reg", &index);
1073 		if (err < 0) {
1074 			port = NULL;
1075 			goto out;
1076 		}
1077 		if (index == rdev->etha->index) {
1078 			if (!of_device_is_available(port))
1079 				port = NULL;
1080 			break;
1081 		}
1082 	}
1083 
1084 out:
1085 	of_node_put(ports);
1086 
1087 	return port;
1088 }
1089 
1090 /* Call of_node_put(mdio) after done */
1091 static struct device_node *rswitch_get_mdio_node(struct rswitch_device *rdev)
1092 {
1093 	struct device_node *port, *mdio;
1094 
1095 	port = rswitch_get_port_node(rdev);
1096 	if (!port)
1097 		return NULL;
1098 
1099 	mdio = of_get_child_by_name(port, "mdio");
1100 	of_node_put(port);
1101 
1102 	return mdio;
1103 }
1104 
1105 static int rswitch_etha_get_params(struct rswitch_device *rdev)
1106 {
1107 	struct device_node *port;
1108 	int err;
1109 
1110 	port = rswitch_get_port_node(rdev);
1111 	if (!port)
1112 		return 0;	/* ignored */
1113 
1114 	err = of_get_phy_mode(port, &rdev->etha->phy_interface);
1115 	of_node_put(port);
1116 
1117 	switch (rdev->etha->phy_interface) {
1118 	case PHY_INTERFACE_MODE_MII:
1119 		rdev->etha->speed = SPEED_100;
1120 		break;
1121 	case PHY_INTERFACE_MODE_SGMII:
1122 		rdev->etha->speed = SPEED_1000;
1123 		break;
1124 	case PHY_INTERFACE_MODE_USXGMII:
1125 		rdev->etha->speed = SPEED_2500;
1126 		break;
1127 	default:
1128 		err = -EINVAL;
1129 		break;
1130 	}
1131 
1132 	return err;
1133 }
1134 
1135 static int rswitch_mii_register(struct rswitch_device *rdev)
1136 {
1137 	struct device_node *mdio_np;
1138 	struct mii_bus *mii_bus;
1139 	int err;
1140 
1141 	mii_bus = mdiobus_alloc();
1142 	if (!mii_bus)
1143 		return -ENOMEM;
1144 
1145 	mii_bus->name = "rswitch_mii";
1146 	sprintf(mii_bus->id, "etha%d", rdev->etha->index);
1147 	mii_bus->priv = rdev->etha;
1148 	mii_bus->read = rswitch_etha_mii_read;
1149 	mii_bus->write = rswitch_etha_mii_write;
1150 	mii_bus->parent = &rdev->priv->pdev->dev;
1151 
1152 	mdio_np = rswitch_get_mdio_node(rdev);
1153 	err = of_mdiobus_register(mii_bus, mdio_np);
1154 	if (err < 0) {
1155 		mdiobus_free(mii_bus);
1156 		goto out;
1157 	}
1158 
1159 	rdev->etha->mii = mii_bus;
1160 
1161 out:
1162 	of_node_put(mdio_np);
1163 
1164 	return err;
1165 }
1166 
1167 static void rswitch_mii_unregister(struct rswitch_device *rdev)
1168 {
1169 	if (rdev->etha->mii) {
1170 		mdiobus_unregister(rdev->etha->mii);
1171 		mdiobus_free(rdev->etha->mii);
1172 		rdev->etha->mii = NULL;
1173 	}
1174 }
1175 
1176 static void rswitch_mac_config(struct phylink_config *config,
1177 			       unsigned int mode,
1178 			       const struct phylink_link_state *state)
1179 {
1180 }
1181 
1182 static void rswitch_mac_link_down(struct phylink_config *config,
1183 				  unsigned int mode,
1184 				  phy_interface_t interface)
1185 {
1186 }
1187 
1188 static void rswitch_mac_link_up(struct phylink_config *config,
1189 				struct phy_device *phydev, unsigned int mode,
1190 				phy_interface_t interface, int speed,
1191 				int duplex, bool tx_pause, bool rx_pause)
1192 {
1193 	/* Current hardware cannot change speed at runtime */
1194 }
1195 
1196 static const struct phylink_mac_ops rswitch_phylink_ops = {
1197 	.mac_config = rswitch_mac_config,
1198 	.mac_link_down = rswitch_mac_link_down,
1199 	.mac_link_up = rswitch_mac_link_up,
1200 };
1201 
1202 static int rswitch_phylink_init(struct rswitch_device *rdev)
1203 {
1204 	struct device_node *port;
1205 	struct phylink *phylink;
1206 	int err;
1207 
1208 	port = rswitch_get_port_node(rdev);
1209 	if (!port)
1210 		return -ENODEV;
1211 
1212 	rdev->phylink_config.dev = &rdev->ndev->dev;
1213 	rdev->phylink_config.type = PHYLINK_NETDEV;
1214 	__set_bit(PHY_INTERFACE_MODE_SGMII, rdev->phylink_config.supported_interfaces);
1215 	__set_bit(PHY_INTERFACE_MODE_USXGMII, rdev->phylink_config.supported_interfaces);
1216 	rdev->phylink_config.mac_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
1217 
1218 	phylink = phylink_create(&rdev->phylink_config, &port->fwnode,
1219 				 rdev->etha->phy_interface, &rswitch_phylink_ops);
1220 	if (IS_ERR(phylink)) {
1221 		err = PTR_ERR(phylink);
1222 		goto out;
1223 	}
1224 
1225 	rdev->phylink = phylink;
1226 	err = phylink_of_phy_connect(rdev->phylink, port, rdev->etha->phy_interface);
1227 out:
1228 	of_node_put(port);
1229 
1230 	return err;
1231 }
1232 
1233 static void rswitch_phylink_deinit(struct rswitch_device *rdev)
1234 {
1235 	rtnl_lock();
1236 	phylink_disconnect_phy(rdev->phylink);
1237 	rtnl_unlock();
1238 	phylink_destroy(rdev->phylink);
1239 }
1240 
1241 static int rswitch_serdes_set_params(struct rswitch_device *rdev)
1242 {
1243 	struct device_node *port = rswitch_get_port_node(rdev);
1244 	struct phy *serdes;
1245 	int err;
1246 
1247 	serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
1248 	of_node_put(port);
1249 	if (IS_ERR(serdes))
1250 		return PTR_ERR(serdes);
1251 
1252 	err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET,
1253 			       rdev->etha->phy_interface);
1254 	if (err < 0)
1255 		return err;
1256 
1257 	return phy_set_speed(serdes, rdev->etha->speed);
1258 }
1259 
1260 static int rswitch_serdes_init(struct rswitch_device *rdev)
1261 {
1262 	struct device_node *port = rswitch_get_port_node(rdev);
1263 	struct phy *serdes;
1264 
1265 	serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
1266 	of_node_put(port);
1267 	if (IS_ERR(serdes))
1268 		return PTR_ERR(serdes);
1269 
1270 	return phy_init(serdes);
1271 }
1272 
1273 static int rswitch_serdes_deinit(struct rswitch_device *rdev)
1274 {
1275 	struct device_node *port = rswitch_get_port_node(rdev);
1276 	struct phy *serdes;
1277 
1278 	serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
1279 	of_node_put(port);
1280 	if (IS_ERR(serdes))
1281 		return PTR_ERR(serdes);
1282 
1283 	return phy_exit(serdes);
1284 }
1285 
1286 static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
1287 {
1288 	int err;
1289 
1290 	if (!rdev->etha->operated) {
1291 		err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1292 		if (err < 0)
1293 			return err;
1294 		rdev->etha->operated = true;
1295 	}
1296 
1297 	err = rswitch_mii_register(rdev);
1298 	if (err < 0)
1299 		return err;
1300 
1301 	err = rswitch_phylink_init(rdev);
1302 	if (err < 0)
1303 		goto err_phylink_init;
1304 
1305 	err = rswitch_serdes_set_params(rdev);
1306 	if (err < 0)
1307 		goto err_serdes_set_params;
1308 
1309 	return 0;
1310 
1311 err_serdes_set_params:
1312 	rswitch_phylink_deinit(rdev);
1313 
1314 err_phylink_init:
1315 	rswitch_mii_unregister(rdev);
1316 
1317 	return err;
1318 }
1319 
1320 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
1321 {
1322 	rswitch_phylink_deinit(rdev);
1323 	rswitch_mii_unregister(rdev);
1324 }
1325 
1326 static int rswitch_ether_port_init_all(struct rswitch_private *priv)
1327 {
1328 	int i, err;
1329 
1330 	rswitch_for_each_enabled_port(priv, i) {
1331 		err = rswitch_ether_port_init_one(priv->rdev[i]);
1332 		if (err)
1333 			goto err_init_one;
1334 	}
1335 
1336 	rswitch_for_each_enabled_port(priv, i) {
1337 		err = rswitch_serdes_init(priv->rdev[i]);
1338 		if (err)
1339 			goto err_serdes;
1340 	}
1341 
1342 	return 0;
1343 
1344 err_serdes:
1345 	rswitch_for_each_enabled_port_continue_reverse(priv, i)
1346 		rswitch_serdes_deinit(priv->rdev[i]);
1347 	i = RSWITCH_NUM_PORTS;
1348 
1349 err_init_one:
1350 	rswitch_for_each_enabled_port_continue_reverse(priv, i)
1351 		rswitch_ether_port_deinit_one(priv->rdev[i]);
1352 
1353 	return err;
1354 }
1355 
1356 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
1357 {
1358 	int i;
1359 
1360 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1361 		rswitch_serdes_deinit(priv->rdev[i]);
1362 		rswitch_ether_port_deinit_one(priv->rdev[i]);
1363 	}
1364 }
1365 
1366 static int rswitch_open(struct net_device *ndev)
1367 {
1368 	struct rswitch_device *rdev = netdev_priv(ndev);
1369 
1370 	phylink_start(rdev->phylink);
1371 
1372 	napi_enable(&rdev->napi);
1373 	netif_start_queue(ndev);
1374 
1375 	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1376 	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
1377 
1378 	return 0;
1379 };
1380 
1381 static int rswitch_stop(struct net_device *ndev)
1382 {
1383 	struct rswitch_device *rdev = netdev_priv(ndev);
1384 
1385 	netif_tx_stop_all_queues(ndev);
1386 
1387 	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1388 	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
1389 
1390 	phylink_stop(rdev->phylink);
1391 	napi_disable(&rdev->napi);
1392 
1393 	return 0;
1394 };
1395 
1396 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1397 {
1398 	struct rswitch_device *rdev = netdev_priv(ndev);
1399 	struct rswitch_gwca_queue *gq = rdev->tx_queue;
1400 	struct rswitch_ext_desc *desc;
1401 	int ret = NETDEV_TX_OK;
1402 	dma_addr_t dma_addr;
1403 
1404 	if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
1405 		netif_stop_subqueue(ndev, 0);
1406 		return ret;
1407 	}
1408 
1409 	if (skb_put_padto(skb, ETH_ZLEN))
1410 		return ret;
1411 
1412 	dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
1413 	if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
1414 		dev_kfree_skb_any(skb);
1415 		return ret;
1416 	}
1417 
1418 	gq->skbs[gq->cur] = skb;
1419 	desc = &gq->ring[gq->cur];
1420 	rswitch_desc_set_dptr(&desc->desc, dma_addr);
1421 	desc->desc.info_ds = cpu_to_le16(skb->len);
1422 
1423 	desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | INFO1_FMT);
1424 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1425 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1426 		rdev->ts_tag++;
1427 		desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
1428 	}
1429 	skb_tx_timestamp(skb);
1430 
1431 	dma_wmb();
1432 
1433 	desc->desc.die_dt = DT_FSINGLE | DIE;
1434 	wmb();	/* gq->cur must be incremented after die_dt was set */
1435 
1436 	gq->cur = rswitch_next_queue_index(gq, true, 1);
1437 	rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
1438 
1439 	return ret;
1440 }
1441 
1442 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
1443 {
1444 	return &ndev->stats;
1445 }
1446 
1447 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
1448 {
1449 	struct rswitch_device *rdev = netdev_priv(ndev);
1450 	struct rcar_gen4_ptp_private *ptp_priv;
1451 	struct hwtstamp_config config;
1452 
1453 	ptp_priv = rdev->priv->ptp_priv;
1454 
1455 	config.flags = 0;
1456 	config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1457 						    HWTSTAMP_TX_OFF;
1458 	switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1459 	case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1460 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1461 		break;
1462 	case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1463 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1464 		break;
1465 	default:
1466 		config.rx_filter = HWTSTAMP_FILTER_NONE;
1467 		break;
1468 	}
1469 
1470 	return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1471 }
1472 
1473 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
1474 {
1475 	struct rswitch_device *rdev = netdev_priv(ndev);
1476 	u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
1477 	struct hwtstamp_config config;
1478 	u32 tstamp_tx_ctrl;
1479 
1480 	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1481 		return -EFAULT;
1482 
1483 	if (config.flags)
1484 		return -EINVAL;
1485 
1486 	switch (config.tx_type) {
1487 	case HWTSTAMP_TX_OFF:
1488 		tstamp_tx_ctrl = 0;
1489 		break;
1490 	case HWTSTAMP_TX_ON:
1491 		tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1492 		break;
1493 	default:
1494 		return -ERANGE;
1495 	}
1496 
1497 	switch (config.rx_filter) {
1498 	case HWTSTAMP_FILTER_NONE:
1499 		tstamp_rx_ctrl = 0;
1500 		break;
1501 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1502 		tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1503 		break;
1504 	default:
1505 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1506 		tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1507 		break;
1508 	}
1509 
1510 	rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1511 	rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1512 
1513 	return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1514 }
1515 
1516 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1517 {
1518 	struct rswitch_device *rdev = netdev_priv(ndev);
1519 
1520 	if (!netif_running(ndev))
1521 		return -EINVAL;
1522 
1523 	switch (cmd) {
1524 	case SIOCGHWTSTAMP:
1525 		return rswitch_hwstamp_get(ndev, req);
1526 	case SIOCSHWTSTAMP:
1527 		return rswitch_hwstamp_set(ndev, req);
1528 	default:
1529 		return phylink_mii_ioctl(rdev->phylink, req, cmd);
1530 	}
1531 }
1532 
1533 static const struct net_device_ops rswitch_netdev_ops = {
1534 	.ndo_open = rswitch_open,
1535 	.ndo_stop = rswitch_stop,
1536 	.ndo_start_xmit = rswitch_start_xmit,
1537 	.ndo_get_stats = rswitch_get_stats,
1538 	.ndo_eth_ioctl = rswitch_eth_ioctl,
1539 	.ndo_validate_addr = eth_validate_addr,
1540 	.ndo_set_mac_address = eth_mac_addr,
1541 };
1542 
1543 static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
1544 {
1545 	struct rswitch_device *rdev = netdev_priv(ndev);
1546 
1547 	info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
1548 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1549 				SOF_TIMESTAMPING_RX_SOFTWARE |
1550 				SOF_TIMESTAMPING_SOFTWARE |
1551 				SOF_TIMESTAMPING_TX_HARDWARE |
1552 				SOF_TIMESTAMPING_RX_HARDWARE |
1553 				SOF_TIMESTAMPING_RAW_HARDWARE;
1554 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1555 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1556 
1557 	return 0;
1558 }
1559 
1560 static const struct ethtool_ops rswitch_ethtool_ops = {
1561 	.get_ts_info = rswitch_get_ts_info,
1562 };
1563 
1564 static const struct of_device_id renesas_eth_sw_of_table[] = {
1565 	{ .compatible = "renesas,r8a779f0-ether-switch", },
1566 	{ }
1567 };
1568 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
1569 
1570 static void rswitch_etha_init(struct rswitch_private *priv, int index)
1571 {
1572 	struct rswitch_etha *etha = &priv->etha[index];
1573 
1574 	memset(etha, 0, sizeof(*etha));
1575 	etha->index = index;
1576 	etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
1577 	etha->coma_addr = priv->addr;
1578 }
1579 
1580 static int rswitch_device_alloc(struct rswitch_private *priv, int index)
1581 {
1582 	struct platform_device *pdev = priv->pdev;
1583 	struct rswitch_device *rdev;
1584 	struct device_node *port;
1585 	struct net_device *ndev;
1586 	int err;
1587 
1588 	if (index >= RSWITCH_NUM_PORTS)
1589 		return -EINVAL;
1590 
1591 	ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
1592 	if (!ndev)
1593 		return -ENOMEM;
1594 
1595 	SET_NETDEV_DEV(ndev, &pdev->dev);
1596 	ether_setup(ndev);
1597 
1598 	rdev = netdev_priv(ndev);
1599 	rdev->ndev = ndev;
1600 	rdev->priv = priv;
1601 	priv->rdev[index] = rdev;
1602 	rdev->port = index;
1603 	rdev->etha = &priv->etha[index];
1604 	rdev->addr = priv->addr;
1605 
1606 	ndev->base_addr = (unsigned long)rdev->addr;
1607 	snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
1608 	ndev->netdev_ops = &rswitch_netdev_ops;
1609 	ndev->ethtool_ops = &rswitch_ethtool_ops;
1610 
1611 	netif_napi_add(ndev, &rdev->napi, rswitch_poll);
1612 
1613 	port = rswitch_get_port_node(rdev);
1614 	rdev->disabled = !port;
1615 	err = of_get_ethdev_address(port, ndev);
1616 	of_node_put(port);
1617 	if (err) {
1618 		if (is_valid_ether_addr(rdev->etha->mac_addr))
1619 			eth_hw_addr_set(ndev, rdev->etha->mac_addr);
1620 		else
1621 			eth_hw_addr_random(ndev);
1622 	}
1623 
1624 	err = rswitch_etha_get_params(rdev);
1625 	if (err < 0)
1626 		goto out_get_params;
1627 
1628 	if (rdev->priv->gwca.speed < rdev->etha->speed)
1629 		rdev->priv->gwca.speed = rdev->etha->speed;
1630 
1631 	err = rswitch_rxdmac_alloc(ndev);
1632 	if (err < 0)
1633 		goto out_rxdmac;
1634 
1635 	err = rswitch_txdmac_alloc(ndev);
1636 	if (err < 0)
1637 		goto out_txdmac;
1638 
1639 	return 0;
1640 
1641 out_txdmac:
1642 	rswitch_rxdmac_free(ndev);
1643 
1644 out_rxdmac:
1645 out_get_params:
1646 	netif_napi_del(&rdev->napi);
1647 	free_netdev(ndev);
1648 
1649 	return err;
1650 }
1651 
1652 static void rswitch_device_free(struct rswitch_private *priv, int index)
1653 {
1654 	struct rswitch_device *rdev = priv->rdev[index];
1655 	struct net_device *ndev = rdev->ndev;
1656 
1657 	rswitch_txdmac_free(ndev);
1658 	rswitch_rxdmac_free(ndev);
1659 	netif_napi_del(&rdev->napi);
1660 	free_netdev(ndev);
1661 }
1662 
1663 static int rswitch_init(struct rswitch_private *priv)
1664 {
1665 	int i, err;
1666 
1667 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1668 		rswitch_etha_init(priv, i);
1669 
1670 	rswitch_clock_enable(priv);
1671 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1672 		rswitch_etha_read_mac_address(&priv->etha[i]);
1673 
1674 	rswitch_reset(priv);
1675 
1676 	rswitch_clock_enable(priv);
1677 	rswitch_top_init(priv);
1678 	err = rswitch_bpool_config(priv);
1679 	if (err < 0)
1680 		return err;
1681 
1682 	err = rswitch_gwca_desc_alloc(priv);
1683 	if (err < 0)
1684 		return -ENOMEM;
1685 
1686 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1687 		err = rswitch_device_alloc(priv, i);
1688 		if (err < 0) {
1689 			for (i--; i >= 0; i--)
1690 				rswitch_device_free(priv, i);
1691 			goto err_device_alloc;
1692 		}
1693 	}
1694 
1695 	rswitch_fwd_init(priv);
1696 
1697 	err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4,
1698 				     RCAR_GEN4_PTP_CLOCK_S4);
1699 	if (err < 0)
1700 		goto err_ptp_register;
1701 
1702 	err = rswitch_gwca_request_irqs(priv);
1703 	if (err < 0)
1704 		goto err_gwca_request_irq;
1705 
1706 	err = rswitch_gwca_hw_init(priv);
1707 	if (err < 0)
1708 		goto err_gwca_hw_init;
1709 
1710 	err = rswitch_ether_port_init_all(priv);
1711 	if (err)
1712 		goto err_ether_port_init_all;
1713 
1714 	rswitch_for_each_enabled_port(priv, i) {
1715 		err = register_netdev(priv->rdev[i]->ndev);
1716 		if (err) {
1717 			rswitch_for_each_enabled_port_continue_reverse(priv, i)
1718 				unregister_netdev(priv->rdev[i]->ndev);
1719 			goto err_register_netdev;
1720 		}
1721 	}
1722 
1723 	rswitch_for_each_enabled_port(priv, i)
1724 		netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
1725 			    priv->rdev[i]->ndev->dev_addr);
1726 
1727 	return 0;
1728 
1729 err_register_netdev:
1730 	rswitch_ether_port_deinit_all(priv);
1731 
1732 err_ether_port_init_all:
1733 	rswitch_gwca_hw_deinit(priv);
1734 
1735 err_gwca_hw_init:
1736 err_gwca_request_irq:
1737 	rcar_gen4_ptp_unregister(priv->ptp_priv);
1738 
1739 err_ptp_register:
1740 	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1741 		rswitch_device_free(priv, i);
1742 
1743 err_device_alloc:
1744 	rswitch_gwca_desc_free(priv);
1745 
1746 	return err;
1747 }
1748 
1749 static int renesas_eth_sw_probe(struct platform_device *pdev)
1750 {
1751 	struct rswitch_private *priv;
1752 	struct resource *res;
1753 	int ret;
1754 
1755 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
1756 	if (!res) {
1757 		dev_err(&pdev->dev, "invalid resource\n");
1758 		return -EINVAL;
1759 	}
1760 
1761 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1762 	if (!priv)
1763 		return -ENOMEM;
1764 
1765 	priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
1766 	if (!priv->ptp_priv)
1767 		return -ENOMEM;
1768 
1769 	platform_set_drvdata(pdev, priv);
1770 	priv->pdev = pdev;
1771 	priv->addr = devm_ioremap_resource(&pdev->dev, res);
1772 	if (IS_ERR(priv->addr))
1773 		return PTR_ERR(priv->addr);
1774 
1775 	priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
1776 
1777 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1778 	if (ret < 0) {
1779 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1780 		if (ret < 0)
1781 			return ret;
1782 	}
1783 
1784 	priv->gwca.index = AGENT_INDEX_GWCA;
1785 	priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
1786 				    RSWITCH_MAX_NUM_QUEUES);
1787 	priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
1788 					 sizeof(*priv->gwca.queues), GFP_KERNEL);
1789 	if (!priv->gwca.queues)
1790 		return -ENOMEM;
1791 
1792 	pm_runtime_enable(&pdev->dev);
1793 	pm_runtime_get_sync(&pdev->dev);
1794 
1795 	ret = rswitch_init(priv);
1796 	if (ret < 0) {
1797 		pm_runtime_put(&pdev->dev);
1798 		pm_runtime_disable(&pdev->dev);
1799 		return ret;
1800 	}
1801 
1802 	device_set_wakeup_capable(&pdev->dev, 1);
1803 
1804 	return ret;
1805 }
1806 
1807 static void rswitch_deinit(struct rswitch_private *priv)
1808 {
1809 	int i;
1810 
1811 	rswitch_gwca_hw_deinit(priv);
1812 	rcar_gen4_ptp_unregister(priv->ptp_priv);
1813 
1814 	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1815 		struct rswitch_device *rdev = priv->rdev[i];
1816 
1817 		rswitch_serdes_deinit(rdev);
1818 		rswitch_ether_port_deinit_one(rdev);
1819 		unregister_netdev(rdev->ndev);
1820 		rswitch_device_free(priv, i);
1821 	}
1822 
1823 	rswitch_gwca_desc_free(priv);
1824 
1825 	rswitch_clock_disable(priv);
1826 }
1827 
1828 static int renesas_eth_sw_remove(struct platform_device *pdev)
1829 {
1830 	struct rswitch_private *priv = platform_get_drvdata(pdev);
1831 
1832 	rswitch_deinit(priv);
1833 
1834 	pm_runtime_put(&pdev->dev);
1835 	pm_runtime_disable(&pdev->dev);
1836 
1837 	platform_set_drvdata(pdev, NULL);
1838 
1839 	return 0;
1840 }
1841 
1842 static struct platform_driver renesas_eth_sw_driver_platform = {
1843 	.probe = renesas_eth_sw_probe,
1844 	.remove = renesas_eth_sw_remove,
1845 	.driver = {
1846 		.name = "renesas_eth_sw",
1847 		.of_match_table = renesas_eth_sw_of_table,
1848 	}
1849 };
1850 module_platform_driver(renesas_eth_sw_driver_platform);
1851 MODULE_AUTHOR("Yoshihiro Shimoda");
1852 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
1853 MODULE_LICENSE("GPL");
1854