1 // SPDX-License-Identifier: GPL-2.0 2 /* Renesas Ethernet Switch device driver 3 * 4 * Copyright (C) 2022 Renesas Electronics Corporation 5 */ 6 7 #include <linux/dma-mapping.h> 8 #include <linux/err.h> 9 #include <linux/etherdevice.h> 10 #include <linux/iopoll.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/net_tstamp.h> 14 #include <linux/of.h> 15 #include <linux/of_device.h> 16 #include <linux/of_irq.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_net.h> 19 #include <linux/phy/phy.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/rtnetlink.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 25 #include "rswitch.h" 26 27 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) 28 { 29 u32 val; 30 31 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected, 32 1, RSWITCH_TIMEOUT_US); 33 } 34 35 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set) 36 { 37 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg); 38 } 39 40 /* Common Agent block (COMA) */ 41 static void rswitch_reset(struct rswitch_private *priv) 42 { 43 iowrite32(RRC_RR, priv->addr + RRC); 44 iowrite32(RRC_RR_CLR, priv->addr + RRC); 45 } 46 47 static void rswitch_clock_enable(struct rswitch_private *priv) 48 { 49 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); 50 } 51 52 static void rswitch_clock_disable(struct rswitch_private *priv) 53 { 54 iowrite32(RCDC_RCD, priv->addr + RCDC); 55 } 56 57 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port) 58 { 59 u32 val = ioread32(coma_addr + RCEC); 60 61 if (val & RCEC_RCE) 62 return (val & BIT(port)) ? true : false; 63 else 64 return false; 65 } 66 67 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable) 68 { 69 u32 val; 70 71 if (enable) { 72 val = ioread32(coma_addr + RCEC); 73 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC); 74 } else { 75 val = ioread32(coma_addr + RCDC); 76 iowrite32(val | BIT(port), coma_addr + RCDC); 77 } 78 } 79 80 static int rswitch_bpool_config(struct rswitch_private *priv) 81 { 82 u32 val; 83 84 val = ioread32(priv->addr + CABPIRM); 85 if (val & CABPIRM_BPR) 86 return 0; 87 88 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); 89 90 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); 91 } 92 93 static void rswitch_coma_init(struct rswitch_private *priv) 94 { 95 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); 96 } 97 98 /* R-Switch-2 block (TOP) */ 99 static void rswitch_top_init(struct rswitch_private *priv) 100 { 101 int i; 102 103 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++) 104 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); 105 } 106 107 /* Forwarding engine block (MFWD) */ 108 static void rswitch_fwd_init(struct rswitch_private *priv) 109 { 110 int i; 111 112 /* For ETHA */ 113 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 114 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i)); 115 iowrite32(0, priv->addr + FWPBFC(i)); 116 } 117 118 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 119 iowrite32(priv->rdev[i]->rx_queue->index, 120 priv->addr + FWPBFCSDC(GWCA_INDEX, i)); 121 iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i)); 122 } 123 124 /* For GWCA */ 125 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index)); 126 iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index)); 127 iowrite32(0, priv->addr + FWPBFC(priv->gwca.index)); 128 iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index)); 129 } 130 131 /* Gateway CPU agent block (GWCA) */ 132 static int rswitch_gwca_change_mode(struct rswitch_private *priv, 133 enum rswitch_gwca_mode mode) 134 { 135 int ret; 136 137 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) 138 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); 139 140 iowrite32(mode, priv->addr + GWMC); 141 142 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); 143 144 if (mode == GWMC_OPC_DISABLE) 145 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); 146 147 return ret; 148 } 149 150 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv) 151 { 152 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); 153 154 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); 155 } 156 157 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv) 158 { 159 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); 160 161 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); 162 } 163 164 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx) 165 { 166 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; 167 int i; 168 169 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 170 if (dis[i] & mask[i]) 171 return true; 172 } 173 174 return false; 175 } 176 177 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis) 178 { 179 int i; 180 181 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 182 dis[i] = ioread32(priv->addr + GWDIS(i)); 183 dis[i] &= ioread32(priv->addr + GWDIE(i)); 184 } 185 } 186 187 static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable) 188 { 189 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32); 190 191 iowrite32(BIT(index % 32), priv->addr + offs); 192 } 193 194 static void rswitch_ack_data_irq(struct rswitch_private *priv, int index) 195 { 196 u32 offs = GWDIS(index / 32); 197 198 iowrite32(BIT(index % 32), priv->addr + offs); 199 } 200 201 static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num) 202 { 203 int index = cur ? gq->cur : gq->dirty; 204 205 if (index + num >= gq->ring_size) 206 index = (index + num) % gq->ring_size; 207 else 208 index += num; 209 210 return index; 211 } 212 213 static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) 214 { 215 if (gq->cur >= gq->dirty) 216 return gq->cur - gq->dirty; 217 else 218 return gq->ring_size - gq->dirty + gq->cur; 219 } 220 221 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) 222 { 223 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; 224 225 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 226 return true; 227 228 return false; 229 } 230 231 static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq, 232 int start_index, int num) 233 { 234 int i, index; 235 236 for (i = 0; i < num; i++) { 237 index = (i + start_index) % gq->ring_size; 238 if (gq->skbs[index]) 239 continue; 240 gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev, 241 PKT_BUF_SZ + RSWITCH_ALIGN - 1); 242 if (!gq->skbs[index]) 243 goto err; 244 } 245 246 return 0; 247 248 err: 249 for (i--; i >= 0; i--) { 250 index = (i + start_index) % gq->ring_size; 251 dev_kfree_skb(gq->skbs[index]); 252 gq->skbs[index] = NULL; 253 } 254 255 return -ENOMEM; 256 } 257 258 static void rswitch_gwca_queue_free(struct net_device *ndev, 259 struct rswitch_gwca_queue *gq) 260 { 261 int i; 262 263 if (!gq->dir_tx) { 264 dma_free_coherent(ndev->dev.parent, 265 sizeof(struct rswitch_ext_ts_desc) * 266 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); 267 gq->rx_ring = NULL; 268 269 for (i = 0; i < gq->ring_size; i++) 270 dev_kfree_skb(gq->skbs[i]); 271 } else { 272 dma_free_coherent(ndev->dev.parent, 273 sizeof(struct rswitch_ext_desc) * 274 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); 275 gq->tx_ring = NULL; 276 } 277 278 kfree(gq->skbs); 279 gq->skbs = NULL; 280 } 281 282 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) 283 { 284 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 285 286 dma_free_coherent(&priv->pdev->dev, 287 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), 288 gq->ts_ring, gq->ring_dma); 289 gq->ts_ring = NULL; 290 } 291 292 static int rswitch_gwca_queue_alloc(struct net_device *ndev, 293 struct rswitch_private *priv, 294 struct rswitch_gwca_queue *gq, 295 bool dir_tx, int ring_size) 296 { 297 int i, bit; 298 299 gq->dir_tx = dir_tx; 300 gq->ring_size = ring_size; 301 gq->ndev = ndev; 302 303 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); 304 if (!gq->skbs) 305 return -ENOMEM; 306 307 if (!dir_tx) { 308 rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size); 309 310 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, 311 sizeof(struct rswitch_ext_ts_desc) * 312 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 313 } else { 314 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, 315 sizeof(struct rswitch_ext_desc) * 316 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 317 } 318 319 if (!gq->rx_ring && !gq->tx_ring) 320 goto out; 321 322 i = gq->index / 32; 323 bit = BIT(gq->index % 32); 324 if (dir_tx) 325 priv->gwca.tx_irq_bits[i] |= bit; 326 else 327 priv->gwca.rx_irq_bits[i] |= bit; 328 329 return 0; 330 331 out: 332 rswitch_gwca_queue_free(ndev, gq); 333 334 return -ENOMEM; 335 } 336 337 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) 338 { 339 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 340 341 gq->ring_size = TS_RING_SIZE; 342 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, 343 sizeof(struct rswitch_ts_desc) * 344 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 345 return !gq->ts_ring ? -ENOMEM : 0; 346 } 347 348 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) 349 { 350 desc->dptrl = cpu_to_le32(lower_32_bits(addr)); 351 desc->dptrh = upper_32_bits(addr) & 0xff; 352 } 353 354 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc) 355 { 356 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; 357 } 358 359 static int rswitch_gwca_queue_format(struct net_device *ndev, 360 struct rswitch_private *priv, 361 struct rswitch_gwca_queue *gq) 362 { 363 int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; 364 struct rswitch_ext_desc *desc; 365 struct rswitch_desc *linkfix; 366 dma_addr_t dma_addr; 367 int i; 368 369 memset(gq->tx_ring, 0, ring_size); 370 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { 371 if (!gq->dir_tx) { 372 dma_addr = dma_map_single(ndev->dev.parent, 373 gq->skbs[i]->data, PKT_BUF_SZ, 374 DMA_FROM_DEVICE); 375 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 376 goto err; 377 378 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); 379 rswitch_desc_set_dptr(&desc->desc, dma_addr); 380 desc->desc.die_dt = DT_FEMPTY | DIE; 381 } else { 382 desc->desc.die_dt = DT_EEMPTY | DIE; 383 } 384 } 385 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 386 desc->desc.die_dt = DT_LINKFIX; 387 388 linkfix = &priv->gwca.linkfix_table[gq->index]; 389 linkfix->die_dt = DT_LINKFIX; 390 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 391 392 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, 393 priv->addr + GWDCC_OFFS(gq->index)); 394 395 return 0; 396 397 err: 398 if (!gq->dir_tx) { 399 for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) { 400 dma_addr = rswitch_desc_get_dptr(&desc->desc); 401 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, 402 DMA_FROM_DEVICE); 403 } 404 } 405 406 return -ENOMEM; 407 } 408 409 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, 410 int start_index, int num) 411 { 412 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 413 struct rswitch_ts_desc *desc; 414 int i, index; 415 416 for (i = 0; i < num; i++) { 417 index = (i + start_index) % gq->ring_size; 418 desc = &gq->ts_ring[index]; 419 desc->desc.die_dt = DT_FEMPTY_ND | DIE; 420 } 421 } 422 423 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, 424 struct rswitch_gwca_queue *gq, 425 int start_index, int num) 426 { 427 struct rswitch_device *rdev = netdev_priv(ndev); 428 struct rswitch_ext_ts_desc *desc; 429 dma_addr_t dma_addr; 430 int i, index; 431 432 for (i = 0; i < num; i++) { 433 index = (i + start_index) % gq->ring_size; 434 desc = &gq->rx_ring[index]; 435 if (!gq->dir_tx) { 436 dma_addr = dma_map_single(ndev->dev.parent, 437 gq->skbs[index]->data, PKT_BUF_SZ, 438 DMA_FROM_DEVICE); 439 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 440 goto err; 441 442 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); 443 rswitch_desc_set_dptr(&desc->desc, dma_addr); 444 dma_wmb(); 445 desc->desc.die_dt = DT_FEMPTY | DIE; 446 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); 447 } else { 448 desc->desc.die_dt = DT_EEMPTY | DIE; 449 } 450 } 451 452 return 0; 453 454 err: 455 if (!gq->dir_tx) { 456 for (i--; i >= 0; i--) { 457 index = (i + start_index) % gq->ring_size; 458 desc = &gq->rx_ring[index]; 459 dma_addr = rswitch_desc_get_dptr(&desc->desc); 460 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, 461 DMA_FROM_DEVICE); 462 } 463 } 464 465 return -ENOMEM; 466 } 467 468 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, 469 struct rswitch_private *priv, 470 struct rswitch_gwca_queue *gq) 471 { 472 int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; 473 struct rswitch_ext_ts_desc *desc; 474 struct rswitch_desc *linkfix; 475 int err; 476 477 memset(gq->rx_ring, 0, ring_size); 478 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); 479 if (err < 0) 480 return err; 481 482 desc = &gq->rx_ring[gq->ring_size]; /* Last */ 483 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 484 desc->desc.die_dt = DT_LINKFIX; 485 486 linkfix = &priv->gwca.linkfix_table[gq->index]; 487 linkfix->die_dt = DT_LINKFIX; 488 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 489 490 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | 491 GWDCC_ETS | GWDCC_EDE, 492 priv->addr + GWDCC_OFFS(gq->index)); 493 494 return 0; 495 } 496 497 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) 498 { 499 int i, num_queues = priv->gwca.num_queues; 500 struct rswitch_gwca *gwca = &priv->gwca; 501 struct device *dev = &priv->pdev->dev; 502 503 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; 504 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, 505 &gwca->linkfix_table_dma, GFP_KERNEL); 506 if (!gwca->linkfix_table) 507 return -ENOMEM; 508 for (i = 0; i < num_queues; i++) 509 gwca->linkfix_table[i].die_dt = DT_EOS; 510 511 return 0; 512 } 513 514 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) 515 { 516 struct rswitch_gwca *gwca = &priv->gwca; 517 518 if (gwca->linkfix_table) 519 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, 520 gwca->linkfix_table, gwca->linkfix_table_dma); 521 gwca->linkfix_table = NULL; 522 } 523 524 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) 525 { 526 struct rswitch_gwca_queue *gq; 527 int index; 528 529 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); 530 if (index >= priv->gwca.num_queues) 531 return NULL; 532 set_bit(index, priv->gwca.used); 533 gq = &priv->gwca.queues[index]; 534 memset(gq, 0, sizeof(*gq)); 535 gq->index = index; 536 537 return gq; 538 } 539 540 static void rswitch_gwca_put(struct rswitch_private *priv, 541 struct rswitch_gwca_queue *gq) 542 { 543 clear_bit(gq->index, priv->gwca.used); 544 } 545 546 static int rswitch_txdmac_alloc(struct net_device *ndev) 547 { 548 struct rswitch_device *rdev = netdev_priv(ndev); 549 struct rswitch_private *priv = rdev->priv; 550 int err; 551 552 rdev->tx_queue = rswitch_gwca_get(priv); 553 if (!rdev->tx_queue) 554 return -EBUSY; 555 556 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); 557 if (err < 0) { 558 rswitch_gwca_put(priv, rdev->tx_queue); 559 return err; 560 } 561 562 return 0; 563 } 564 565 static void rswitch_txdmac_free(struct net_device *ndev) 566 { 567 struct rswitch_device *rdev = netdev_priv(ndev); 568 569 rswitch_gwca_queue_free(ndev, rdev->tx_queue); 570 rswitch_gwca_put(rdev->priv, rdev->tx_queue); 571 } 572 573 static int rswitch_txdmac_init(struct rswitch_private *priv, int index) 574 { 575 struct rswitch_device *rdev = priv->rdev[index]; 576 577 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); 578 } 579 580 static int rswitch_rxdmac_alloc(struct net_device *ndev) 581 { 582 struct rswitch_device *rdev = netdev_priv(ndev); 583 struct rswitch_private *priv = rdev->priv; 584 int err; 585 586 rdev->rx_queue = rswitch_gwca_get(priv); 587 if (!rdev->rx_queue) 588 return -EBUSY; 589 590 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); 591 if (err < 0) { 592 rswitch_gwca_put(priv, rdev->rx_queue); 593 return err; 594 } 595 596 return 0; 597 } 598 599 static void rswitch_rxdmac_free(struct net_device *ndev) 600 { 601 struct rswitch_device *rdev = netdev_priv(ndev); 602 603 rswitch_gwca_queue_free(ndev, rdev->rx_queue); 604 rswitch_gwca_put(rdev->priv, rdev->rx_queue); 605 } 606 607 static int rswitch_rxdmac_init(struct rswitch_private *priv, int index) 608 { 609 struct rswitch_device *rdev = priv->rdev[index]; 610 struct net_device *ndev = rdev->ndev; 611 612 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); 613 } 614 615 static int rswitch_gwca_hw_init(struct rswitch_private *priv) 616 { 617 int i, err; 618 619 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 620 if (err < 0) 621 return err; 622 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG); 623 if (err < 0) 624 return err; 625 626 err = rswitch_gwca_mcast_table_reset(priv); 627 if (err < 0) 628 return err; 629 err = rswitch_gwca_axi_ram_reset(priv); 630 if (err < 0) 631 return err; 632 633 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); 634 iowrite32(0, priv->addr + GWTTFC); 635 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); 636 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); 637 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); 638 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); 639 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); 640 641 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); 642 643 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 644 err = rswitch_rxdmac_init(priv, i); 645 if (err < 0) 646 return err; 647 err = rswitch_txdmac_init(priv, i); 648 if (err < 0) 649 return err; 650 } 651 652 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 653 if (err < 0) 654 return err; 655 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION); 656 } 657 658 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv) 659 { 660 int err; 661 662 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 663 if (err < 0) 664 return err; 665 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET); 666 if (err < 0) 667 return err; 668 669 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 670 } 671 672 static int rswitch_gwca_halt(struct rswitch_private *priv) 673 { 674 int err; 675 676 priv->gwca_halt = true; 677 err = rswitch_gwca_hw_deinit(priv); 678 dev_err(&priv->pdev->dev, "halted (%d)\n", err); 679 680 return err; 681 } 682 683 static bool rswitch_rx(struct net_device *ndev, int *quota) 684 { 685 struct rswitch_device *rdev = netdev_priv(ndev); 686 struct rswitch_gwca_queue *gq = rdev->rx_queue; 687 struct rswitch_ext_ts_desc *desc; 688 int limit, boguscnt, num, ret; 689 struct sk_buff *skb; 690 dma_addr_t dma_addr; 691 u16 pkt_len; 692 u32 get_ts; 693 694 if (*quota <= 0) 695 return true; 696 697 boguscnt = min_t(int, gq->ring_size, *quota); 698 limit = boguscnt; 699 700 desc = &gq->rx_ring[gq->cur]; 701 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { 702 dma_rmb(); 703 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; 704 skb = gq->skbs[gq->cur]; 705 gq->skbs[gq->cur] = NULL; 706 dma_addr = rswitch_desc_get_dptr(&desc->desc); 707 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE); 708 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 709 if (get_ts) { 710 struct skb_shared_hwtstamps *shhwtstamps; 711 struct timespec64 ts; 712 713 shhwtstamps = skb_hwtstamps(skb); 714 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 715 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 716 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 717 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); 718 } 719 skb_put(skb, pkt_len); 720 skb->protocol = eth_type_trans(skb, ndev); 721 napi_gro_receive(&rdev->napi, skb); 722 rdev->ndev->stats.rx_packets++; 723 rdev->ndev->stats.rx_bytes += pkt_len; 724 725 gq->cur = rswitch_next_queue_index(gq, true, 1); 726 desc = &gq->rx_ring[gq->cur]; 727 728 if (--boguscnt <= 0) 729 break; 730 } 731 732 num = rswitch_get_num_cur_queues(gq); 733 ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num); 734 if (ret < 0) 735 goto err; 736 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); 737 if (ret < 0) 738 goto err; 739 gq->dirty = rswitch_next_queue_index(gq, false, num); 740 741 *quota -= limit - boguscnt; 742 743 return boguscnt <= 0; 744 745 err: 746 rswitch_gwca_halt(rdev->priv); 747 748 return 0; 749 } 750 751 static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only) 752 { 753 struct rswitch_device *rdev = netdev_priv(ndev); 754 struct rswitch_gwca_queue *gq = rdev->tx_queue; 755 struct rswitch_ext_desc *desc; 756 dma_addr_t dma_addr; 757 struct sk_buff *skb; 758 int free_num = 0; 759 int size; 760 761 for (; rswitch_get_num_cur_queues(gq) > 0; 762 gq->dirty = rswitch_next_queue_index(gq, false, 1)) { 763 desc = &gq->tx_ring[gq->dirty]; 764 if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 765 break; 766 767 dma_rmb(); 768 size = le16_to_cpu(desc->desc.info_ds) & TX_DS; 769 skb = gq->skbs[gq->dirty]; 770 if (skb) { 771 dma_addr = rswitch_desc_get_dptr(&desc->desc); 772 dma_unmap_single(ndev->dev.parent, dma_addr, 773 size, DMA_TO_DEVICE); 774 dev_kfree_skb_any(gq->skbs[gq->dirty]); 775 gq->skbs[gq->dirty] = NULL; 776 free_num++; 777 } 778 desc->desc.die_dt = DT_EEMPTY; 779 rdev->ndev->stats.tx_packets++; 780 rdev->ndev->stats.tx_bytes += size; 781 } 782 783 return free_num; 784 } 785 786 static int rswitch_poll(struct napi_struct *napi, int budget) 787 { 788 struct net_device *ndev = napi->dev; 789 struct rswitch_private *priv; 790 struct rswitch_device *rdev; 791 int quota = budget; 792 793 rdev = netdev_priv(ndev); 794 priv = rdev->priv; 795 796 retry: 797 rswitch_tx_free(ndev, true); 798 799 if (rswitch_rx(ndev, "a)) 800 goto out; 801 else if (rdev->priv->gwca_halt) 802 goto err; 803 else if (rswitch_is_queue_rxed(rdev->rx_queue)) 804 goto retry; 805 806 netif_wake_subqueue(ndev, 0); 807 808 napi_complete(napi); 809 810 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); 811 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); 812 813 out: 814 return budget - quota; 815 816 err: 817 napi_complete(napi); 818 819 return 0; 820 } 821 822 static void rswitch_queue_interrupt(struct net_device *ndev) 823 { 824 struct rswitch_device *rdev = netdev_priv(ndev); 825 826 if (napi_schedule_prep(&rdev->napi)) { 827 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 828 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 829 __napi_schedule(&rdev->napi); 830 } 831 } 832 833 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) 834 { 835 struct rswitch_gwca_queue *gq; 836 int i, index, bit; 837 838 for (i = 0; i < priv->gwca.num_queues; i++) { 839 gq = &priv->gwca.queues[i]; 840 index = gq->index / 32; 841 bit = BIT(gq->index % 32); 842 if (!(dis[index] & bit)) 843 continue; 844 845 rswitch_ack_data_irq(priv, gq->index); 846 rswitch_queue_interrupt(gq->ndev); 847 } 848 849 return IRQ_HANDLED; 850 } 851 852 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id) 853 { 854 struct rswitch_private *priv = dev_id; 855 u32 dis[RSWITCH_NUM_IRQ_REGS]; 856 irqreturn_t ret = IRQ_NONE; 857 858 rswitch_get_data_irq_status(priv, dis); 859 860 if (rswitch_is_any_data_irq(priv, dis, true) || 861 rswitch_is_any_data_irq(priv, dis, false)) 862 ret = rswitch_data_irq(priv, dis); 863 864 return ret; 865 } 866 867 static int rswitch_gwca_request_irqs(struct rswitch_private *priv) 868 { 869 char *resource_name, *irq_name; 870 int i, ret, irq; 871 872 for (i = 0; i < GWCA_NUM_IRQS; i++) { 873 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i); 874 if (!resource_name) 875 return -ENOMEM; 876 877 irq = platform_get_irq_byname(priv->pdev, resource_name); 878 kfree(resource_name); 879 if (irq < 0) 880 return irq; 881 882 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, 883 GWCA_IRQ_NAME, i); 884 if (!irq_name) 885 return -ENOMEM; 886 887 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, 888 0, irq_name, priv); 889 if (ret < 0) 890 return ret; 891 } 892 893 return 0; 894 } 895 896 static void rswitch_ts(struct rswitch_private *priv) 897 { 898 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 899 struct rswitch_gwca_ts_info *ts_info, *ts_info2; 900 struct skb_shared_hwtstamps shhwtstamps; 901 struct rswitch_ts_desc *desc; 902 struct timespec64 ts; 903 u32 tag, port; 904 int num; 905 906 desc = &gq->ts_ring[gq->cur]; 907 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { 908 dma_rmb(); 909 910 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); 911 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); 912 913 list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) { 914 if (!(ts_info->port == port && ts_info->tag == tag)) 915 continue; 916 917 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 918 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 919 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 920 shhwtstamps.hwtstamp = timespec64_to_ktime(ts); 921 skb_tstamp_tx(ts_info->skb, &shhwtstamps); 922 dev_consume_skb_irq(ts_info->skb); 923 list_del(&ts_info->list); 924 kfree(ts_info); 925 break; 926 } 927 928 gq->cur = rswitch_next_queue_index(gq, true, 1); 929 desc = &gq->ts_ring[gq->cur]; 930 } 931 932 num = rswitch_get_num_cur_queues(gq); 933 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); 934 gq->dirty = rswitch_next_queue_index(gq, false, num); 935 } 936 937 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) 938 { 939 struct rswitch_private *priv = dev_id; 940 941 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { 942 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); 943 rswitch_ts(priv); 944 945 return IRQ_HANDLED; 946 } 947 948 return IRQ_NONE; 949 } 950 951 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) 952 { 953 int irq; 954 955 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); 956 if (irq < 0) 957 return irq; 958 959 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, 960 0, GWCA_TS_IRQ_NAME, priv); 961 } 962 963 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ 964 static int rswitch_etha_change_mode(struct rswitch_etha *etha, 965 enum rswitch_etha_mode mode) 966 { 967 int ret; 968 969 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) 970 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); 971 972 iowrite32(mode, etha->addr + EAMC); 973 974 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); 975 976 if (mode == EAMC_OPC_DISABLE) 977 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); 978 979 return ret; 980 } 981 982 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha) 983 { 984 u32 mrmac0 = ioread32(etha->addr + MRMAC0); 985 u32 mrmac1 = ioread32(etha->addr + MRMAC1); 986 u8 *mac = ða->mac_addr[0]; 987 988 mac[0] = (mrmac0 >> 8) & 0xFF; 989 mac[1] = (mrmac0 >> 0) & 0xFF; 990 mac[2] = (mrmac1 >> 24) & 0xFF; 991 mac[3] = (mrmac1 >> 16) & 0xFF; 992 mac[4] = (mrmac1 >> 8) & 0xFF; 993 mac[5] = (mrmac1 >> 0) & 0xFF; 994 } 995 996 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac) 997 { 998 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); 999 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], 1000 etha->addr + MRMAC1); 1001 } 1002 1003 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) 1004 { 1005 iowrite32(MLVC_PLV, etha->addr + MLVC); 1006 1007 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); 1008 } 1009 1010 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) 1011 { 1012 u32 val; 1013 1014 rswitch_etha_write_mac_address(etha, mac); 1015 1016 switch (etha->speed) { 1017 case 100: 1018 val = MPIC_LSC_100M; 1019 break; 1020 case 1000: 1021 val = MPIC_LSC_1G; 1022 break; 1023 case 2500: 1024 val = MPIC_LSC_2_5G; 1025 break; 1026 default: 1027 return; 1028 } 1029 1030 iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC); 1031 } 1032 1033 static void rswitch_etha_enable_mii(struct rswitch_etha *etha) 1034 { 1035 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, 1036 MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06)); 1037 rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); 1038 } 1039 1040 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) 1041 { 1042 int err; 1043 1044 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1045 if (err < 0) 1046 return err; 1047 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG); 1048 if (err < 0) 1049 return err; 1050 1051 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); 1052 rswitch_rmac_setting(etha, mac); 1053 rswitch_etha_enable_mii(etha); 1054 1055 err = rswitch_etha_wait_link_verification(etha); 1056 if (err < 0) 1057 return err; 1058 1059 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1060 if (err < 0) 1061 return err; 1062 1063 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION); 1064 } 1065 1066 static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read, 1067 int phyad, int devad, int regad, int data) 1068 { 1069 int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45; 1070 u32 val; 1071 int ret; 1072 1073 if (devad == 0xffffffff) 1074 return -ENODEV; 1075 1076 writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1); 1077 1078 val = MPSM_PSME | MPSM_MFF_C45; 1079 iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); 1080 1081 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); 1082 if (ret) 1083 return ret; 1084 1085 rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); 1086 1087 if (read) { 1088 writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); 1089 1090 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); 1091 if (ret) 1092 return ret; 1093 1094 ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16; 1095 1096 rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); 1097 } else { 1098 iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val, 1099 etha->addr + MPSM); 1100 1101 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS); 1102 } 1103 1104 return ret; 1105 } 1106 1107 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, 1108 int regad) 1109 { 1110 struct rswitch_etha *etha = bus->priv; 1111 1112 return rswitch_etha_set_access(etha, true, addr, devad, regad, 0); 1113 } 1114 1115 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, 1116 int regad, u16 val) 1117 { 1118 struct rswitch_etha *etha = bus->priv; 1119 1120 return rswitch_etha_set_access(etha, false, addr, devad, regad, val); 1121 } 1122 1123 /* Call of_node_put(port) after done */ 1124 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) 1125 { 1126 struct device_node *ports, *port; 1127 int err = 0; 1128 u32 index; 1129 1130 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, 1131 "ethernet-ports"); 1132 if (!ports) 1133 return NULL; 1134 1135 for_each_child_of_node(ports, port) { 1136 err = of_property_read_u32(port, "reg", &index); 1137 if (err < 0) { 1138 port = NULL; 1139 goto out; 1140 } 1141 if (index == rdev->etha->index) { 1142 if (!of_device_is_available(port)) 1143 port = NULL; 1144 break; 1145 } 1146 } 1147 1148 out: 1149 of_node_put(ports); 1150 1151 return port; 1152 } 1153 1154 static int rswitch_etha_get_params(struct rswitch_device *rdev) 1155 { 1156 u32 max_speed; 1157 int err; 1158 1159 if (!rdev->np_port) 1160 return 0; /* ignored */ 1161 1162 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); 1163 if (err) 1164 return err; 1165 1166 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); 1167 if (!err) { 1168 rdev->etha->speed = max_speed; 1169 return 0; 1170 } 1171 1172 /* if no "max-speed" property, let's use default speed */ 1173 switch (rdev->etha->phy_interface) { 1174 case PHY_INTERFACE_MODE_MII: 1175 rdev->etha->speed = SPEED_100; 1176 break; 1177 case PHY_INTERFACE_MODE_SGMII: 1178 rdev->etha->speed = SPEED_1000; 1179 break; 1180 case PHY_INTERFACE_MODE_USXGMII: 1181 rdev->etha->speed = SPEED_2500; 1182 break; 1183 default: 1184 return -EINVAL; 1185 } 1186 1187 return 0; 1188 } 1189 1190 static int rswitch_mii_register(struct rswitch_device *rdev) 1191 { 1192 struct device_node *mdio_np; 1193 struct mii_bus *mii_bus; 1194 int err; 1195 1196 mii_bus = mdiobus_alloc(); 1197 if (!mii_bus) 1198 return -ENOMEM; 1199 1200 mii_bus->name = "rswitch_mii"; 1201 sprintf(mii_bus->id, "etha%d", rdev->etha->index); 1202 mii_bus->priv = rdev->etha; 1203 mii_bus->read_c45 = rswitch_etha_mii_read_c45; 1204 mii_bus->write_c45 = rswitch_etha_mii_write_c45; 1205 mii_bus->parent = &rdev->priv->pdev->dev; 1206 1207 mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); 1208 err = of_mdiobus_register(mii_bus, mdio_np); 1209 if (err < 0) { 1210 mdiobus_free(mii_bus); 1211 goto out; 1212 } 1213 1214 rdev->etha->mii = mii_bus; 1215 1216 out: 1217 of_node_put(mdio_np); 1218 1219 return err; 1220 } 1221 1222 static void rswitch_mii_unregister(struct rswitch_device *rdev) 1223 { 1224 if (rdev->etha->mii) { 1225 mdiobus_unregister(rdev->etha->mii); 1226 mdiobus_free(rdev->etha->mii); 1227 rdev->etha->mii = NULL; 1228 } 1229 } 1230 1231 static void rswitch_adjust_link(struct net_device *ndev) 1232 { 1233 struct rswitch_device *rdev = netdev_priv(ndev); 1234 struct phy_device *phydev = ndev->phydev; 1235 1236 /* Current hardware has a restriction not to change speed at runtime */ 1237 if (phydev->link != rdev->etha->link) { 1238 phy_print_status(phydev); 1239 if (phydev->link) 1240 phy_power_on(rdev->serdes); 1241 else 1242 phy_power_off(rdev->serdes); 1243 1244 rdev->etha->link = phydev->link; 1245 } 1246 } 1247 1248 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, 1249 struct phy_device *phydev) 1250 { 1251 /* Current hardware has a restriction not to change speed at runtime */ 1252 switch (rdev->etha->speed) { 1253 case SPEED_2500: 1254 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1255 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1256 break; 1257 case SPEED_1000: 1258 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1259 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1260 break; 1261 case SPEED_100: 1262 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1263 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1264 break; 1265 default: 1266 break; 1267 } 1268 1269 phy_set_max_speed(phydev, rdev->etha->speed); 1270 } 1271 1272 static int rswitch_phy_device_init(struct rswitch_device *rdev) 1273 { 1274 struct phy_device *phydev; 1275 struct device_node *phy; 1276 int err = -ENOENT; 1277 1278 if (!rdev->np_port) 1279 return -ENODEV; 1280 1281 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); 1282 if (!phy) 1283 return -ENODEV; 1284 1285 /* Set phydev->host_interfaces before calling of_phy_connect() to 1286 * configure the PHY with the information of host_interfaces. 1287 */ 1288 phydev = of_phy_find_device(phy); 1289 if (!phydev) 1290 goto out; 1291 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); 1292 1293 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, 1294 rdev->etha->phy_interface); 1295 if (!phydev) 1296 goto out; 1297 1298 phy_set_max_speed(phydev, SPEED_2500); 1299 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 1300 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 1301 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 1302 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1303 rswitch_phy_remove_link_mode(rdev, phydev); 1304 1305 phy_attached_info(phydev); 1306 1307 err = 0; 1308 out: 1309 of_node_put(phy); 1310 1311 return err; 1312 } 1313 1314 static void rswitch_phy_device_deinit(struct rswitch_device *rdev) 1315 { 1316 if (rdev->ndev->phydev) 1317 phy_disconnect(rdev->ndev->phydev); 1318 } 1319 1320 static int rswitch_serdes_set_params(struct rswitch_device *rdev) 1321 { 1322 int err; 1323 1324 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, 1325 rdev->etha->phy_interface); 1326 if (err < 0) 1327 return err; 1328 1329 return phy_set_speed(rdev->serdes, rdev->etha->speed); 1330 } 1331 1332 static int rswitch_ether_port_init_one(struct rswitch_device *rdev) 1333 { 1334 int err; 1335 1336 if (!rdev->etha->operated) { 1337 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1338 if (err < 0) 1339 return err; 1340 rdev->etha->operated = true; 1341 } 1342 1343 err = rswitch_mii_register(rdev); 1344 if (err < 0) 1345 return err; 1346 1347 err = rswitch_phy_device_init(rdev); 1348 if (err < 0) 1349 goto err_phy_device_init; 1350 1351 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); 1352 if (IS_ERR(rdev->serdes)) { 1353 err = PTR_ERR(rdev->serdes); 1354 goto err_serdes_phy_get; 1355 } 1356 1357 err = rswitch_serdes_set_params(rdev); 1358 if (err < 0) 1359 goto err_serdes_set_params; 1360 1361 return 0; 1362 1363 err_serdes_set_params: 1364 err_serdes_phy_get: 1365 rswitch_phy_device_deinit(rdev); 1366 1367 err_phy_device_init: 1368 rswitch_mii_unregister(rdev); 1369 1370 return err; 1371 } 1372 1373 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) 1374 { 1375 rswitch_phy_device_deinit(rdev); 1376 rswitch_mii_unregister(rdev); 1377 } 1378 1379 static int rswitch_ether_port_init_all(struct rswitch_private *priv) 1380 { 1381 int i, err; 1382 1383 rswitch_for_each_enabled_port(priv, i) { 1384 err = rswitch_ether_port_init_one(priv->rdev[i]); 1385 if (err) 1386 goto err_init_one; 1387 } 1388 1389 rswitch_for_each_enabled_port(priv, i) { 1390 err = phy_init(priv->rdev[i]->serdes); 1391 if (err) 1392 goto err_serdes; 1393 } 1394 1395 return 0; 1396 1397 err_serdes: 1398 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1399 phy_exit(priv->rdev[i]->serdes); 1400 i = RSWITCH_NUM_PORTS; 1401 1402 err_init_one: 1403 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1404 rswitch_ether_port_deinit_one(priv->rdev[i]); 1405 1406 return err; 1407 } 1408 1409 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) 1410 { 1411 int i; 1412 1413 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1414 phy_exit(priv->rdev[i]->serdes); 1415 rswitch_ether_port_deinit_one(priv->rdev[i]); 1416 } 1417 } 1418 1419 static int rswitch_open(struct net_device *ndev) 1420 { 1421 struct rswitch_device *rdev = netdev_priv(ndev); 1422 1423 phy_start(ndev->phydev); 1424 1425 napi_enable(&rdev->napi); 1426 netif_start_queue(ndev); 1427 1428 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); 1429 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); 1430 1431 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1432 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); 1433 1434 bitmap_set(rdev->priv->opened_ports, rdev->port, 1); 1435 1436 return 0; 1437 }; 1438 1439 static int rswitch_stop(struct net_device *ndev) 1440 { 1441 struct rswitch_device *rdev = netdev_priv(ndev); 1442 struct rswitch_gwca_ts_info *ts_info, *ts_info2; 1443 1444 netif_tx_stop_all_queues(ndev); 1445 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); 1446 1447 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1448 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); 1449 1450 list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { 1451 if (ts_info->port != rdev->port) 1452 continue; 1453 dev_kfree_skb_irq(ts_info->skb); 1454 list_del(&ts_info->list); 1455 kfree(ts_info); 1456 } 1457 1458 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 1459 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 1460 1461 phy_stop(ndev->phydev); 1462 napi_disable(&rdev->napi); 1463 1464 return 0; 1465 }; 1466 1467 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1468 { 1469 struct rswitch_device *rdev = netdev_priv(ndev); 1470 struct rswitch_gwca_queue *gq = rdev->tx_queue; 1471 struct rswitch_ext_desc *desc; 1472 int ret = NETDEV_TX_OK; 1473 dma_addr_t dma_addr; 1474 1475 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { 1476 netif_stop_subqueue(ndev, 0); 1477 return NETDEV_TX_BUSY; 1478 } 1479 1480 if (skb_put_padto(skb, ETH_ZLEN)) 1481 return ret; 1482 1483 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); 1484 if (dma_mapping_error(ndev->dev.parent, dma_addr)) { 1485 dev_kfree_skb_any(skb); 1486 return ret; 1487 } 1488 1489 gq->skbs[gq->cur] = skb; 1490 desc = &gq->tx_ring[gq->cur]; 1491 rswitch_desc_set_dptr(&desc->desc, dma_addr); 1492 desc->desc.info_ds = cpu_to_le16(skb->len); 1493 1494 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | 1495 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT); 1496 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 1497 struct rswitch_gwca_ts_info *ts_info; 1498 1499 ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); 1500 if (!ts_info) { 1501 dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); 1502 return -ENOMEM; 1503 } 1504 1505 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1506 rdev->ts_tag++; 1507 desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC); 1508 1509 ts_info->skb = skb_get(skb); 1510 ts_info->port = rdev->port; 1511 ts_info->tag = rdev->ts_tag; 1512 list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list); 1513 1514 skb_tx_timestamp(skb); 1515 } 1516 1517 dma_wmb(); 1518 1519 desc->desc.die_dt = DT_FSINGLE | DIE; 1520 wmb(); /* gq->cur must be incremented after die_dt was set */ 1521 1522 gq->cur = rswitch_next_queue_index(gq, true, 1); 1523 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); 1524 1525 return ret; 1526 } 1527 1528 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) 1529 { 1530 return &ndev->stats; 1531 } 1532 1533 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) 1534 { 1535 struct rswitch_device *rdev = netdev_priv(ndev); 1536 struct rcar_gen4_ptp_private *ptp_priv; 1537 struct hwtstamp_config config; 1538 1539 ptp_priv = rdev->priv->ptp_priv; 1540 1541 config.flags = 0; 1542 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : 1543 HWTSTAMP_TX_OFF; 1544 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { 1545 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: 1546 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1547 break; 1548 case RCAR_GEN4_RXTSTAMP_TYPE_ALL: 1549 config.rx_filter = HWTSTAMP_FILTER_ALL; 1550 break; 1551 default: 1552 config.rx_filter = HWTSTAMP_FILTER_NONE; 1553 break; 1554 } 1555 1556 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1557 } 1558 1559 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) 1560 { 1561 struct rswitch_device *rdev = netdev_priv(ndev); 1562 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; 1563 struct hwtstamp_config config; 1564 u32 tstamp_tx_ctrl; 1565 1566 if (copy_from_user(&config, req->ifr_data, sizeof(config))) 1567 return -EFAULT; 1568 1569 if (config.flags) 1570 return -EINVAL; 1571 1572 switch (config.tx_type) { 1573 case HWTSTAMP_TX_OFF: 1574 tstamp_tx_ctrl = 0; 1575 break; 1576 case HWTSTAMP_TX_ON: 1577 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; 1578 break; 1579 default: 1580 return -ERANGE; 1581 } 1582 1583 switch (config.rx_filter) { 1584 case HWTSTAMP_FILTER_NONE: 1585 tstamp_rx_ctrl = 0; 1586 break; 1587 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1588 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 1589 break; 1590 default: 1591 config.rx_filter = HWTSTAMP_FILTER_ALL; 1592 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; 1593 break; 1594 } 1595 1596 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; 1597 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; 1598 1599 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1600 } 1601 1602 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 1603 { 1604 if (!netif_running(ndev)) 1605 return -EINVAL; 1606 1607 switch (cmd) { 1608 case SIOCGHWTSTAMP: 1609 return rswitch_hwstamp_get(ndev, req); 1610 case SIOCSHWTSTAMP: 1611 return rswitch_hwstamp_set(ndev, req); 1612 default: 1613 return phy_mii_ioctl(ndev->phydev, req, cmd); 1614 } 1615 } 1616 1617 static const struct net_device_ops rswitch_netdev_ops = { 1618 .ndo_open = rswitch_open, 1619 .ndo_stop = rswitch_stop, 1620 .ndo_start_xmit = rswitch_start_xmit, 1621 .ndo_get_stats = rswitch_get_stats, 1622 .ndo_eth_ioctl = rswitch_eth_ioctl, 1623 .ndo_validate_addr = eth_validate_addr, 1624 .ndo_set_mac_address = eth_mac_addr, 1625 }; 1626 1627 static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) 1628 { 1629 struct rswitch_device *rdev = netdev_priv(ndev); 1630 1631 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); 1632 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1633 SOF_TIMESTAMPING_RX_SOFTWARE | 1634 SOF_TIMESTAMPING_SOFTWARE | 1635 SOF_TIMESTAMPING_TX_HARDWARE | 1636 SOF_TIMESTAMPING_RX_HARDWARE | 1637 SOF_TIMESTAMPING_RAW_HARDWARE; 1638 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 1639 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 1640 1641 return 0; 1642 } 1643 1644 static const struct ethtool_ops rswitch_ethtool_ops = { 1645 .get_ts_info = rswitch_get_ts_info, 1646 }; 1647 1648 static const struct of_device_id renesas_eth_sw_of_table[] = { 1649 { .compatible = "renesas,r8a779f0-ether-switch", }, 1650 { } 1651 }; 1652 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table); 1653 1654 static void rswitch_etha_init(struct rswitch_private *priv, int index) 1655 { 1656 struct rswitch_etha *etha = &priv->etha[index]; 1657 1658 memset(etha, 0, sizeof(*etha)); 1659 etha->index = index; 1660 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; 1661 etha->coma_addr = priv->addr; 1662 } 1663 1664 static int rswitch_device_alloc(struct rswitch_private *priv, int index) 1665 { 1666 struct platform_device *pdev = priv->pdev; 1667 struct rswitch_device *rdev; 1668 struct net_device *ndev; 1669 int err; 1670 1671 if (index >= RSWITCH_NUM_PORTS) 1672 return -EINVAL; 1673 1674 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1); 1675 if (!ndev) 1676 return -ENOMEM; 1677 1678 SET_NETDEV_DEV(ndev, &pdev->dev); 1679 ether_setup(ndev); 1680 1681 rdev = netdev_priv(ndev); 1682 rdev->ndev = ndev; 1683 rdev->priv = priv; 1684 priv->rdev[index] = rdev; 1685 rdev->port = index; 1686 rdev->etha = &priv->etha[index]; 1687 rdev->addr = priv->addr; 1688 1689 ndev->base_addr = (unsigned long)rdev->addr; 1690 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); 1691 ndev->netdev_ops = &rswitch_netdev_ops; 1692 ndev->ethtool_ops = &rswitch_ethtool_ops; 1693 1694 netif_napi_add(ndev, &rdev->napi, rswitch_poll); 1695 1696 rdev->np_port = rswitch_get_port_node(rdev); 1697 rdev->disabled = !rdev->np_port; 1698 err = of_get_ethdev_address(rdev->np_port, ndev); 1699 of_node_put(rdev->np_port); 1700 if (err) { 1701 if (is_valid_ether_addr(rdev->etha->mac_addr)) 1702 eth_hw_addr_set(ndev, rdev->etha->mac_addr); 1703 else 1704 eth_hw_addr_random(ndev); 1705 } 1706 1707 err = rswitch_etha_get_params(rdev); 1708 if (err < 0) 1709 goto out_get_params; 1710 1711 if (rdev->priv->gwca.speed < rdev->etha->speed) 1712 rdev->priv->gwca.speed = rdev->etha->speed; 1713 1714 err = rswitch_rxdmac_alloc(ndev); 1715 if (err < 0) 1716 goto out_rxdmac; 1717 1718 err = rswitch_txdmac_alloc(ndev); 1719 if (err < 0) 1720 goto out_txdmac; 1721 1722 return 0; 1723 1724 out_txdmac: 1725 rswitch_rxdmac_free(ndev); 1726 1727 out_rxdmac: 1728 out_get_params: 1729 netif_napi_del(&rdev->napi); 1730 free_netdev(ndev); 1731 1732 return err; 1733 } 1734 1735 static void rswitch_device_free(struct rswitch_private *priv, int index) 1736 { 1737 struct rswitch_device *rdev = priv->rdev[index]; 1738 struct net_device *ndev = rdev->ndev; 1739 1740 rswitch_txdmac_free(ndev); 1741 rswitch_rxdmac_free(ndev); 1742 netif_napi_del(&rdev->napi); 1743 free_netdev(ndev); 1744 } 1745 1746 static int rswitch_init(struct rswitch_private *priv) 1747 { 1748 int i, err; 1749 1750 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1751 rswitch_etha_init(priv, i); 1752 1753 rswitch_clock_enable(priv); 1754 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1755 rswitch_etha_read_mac_address(&priv->etha[i]); 1756 1757 rswitch_reset(priv); 1758 1759 rswitch_clock_enable(priv); 1760 rswitch_top_init(priv); 1761 err = rswitch_bpool_config(priv); 1762 if (err < 0) 1763 return err; 1764 1765 rswitch_coma_init(priv); 1766 1767 err = rswitch_gwca_linkfix_alloc(priv); 1768 if (err < 0) 1769 return -ENOMEM; 1770 1771 err = rswitch_gwca_ts_queue_alloc(priv); 1772 if (err < 0) 1773 goto err_ts_queue_alloc; 1774 1775 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); 1776 INIT_LIST_HEAD(&priv->gwca.ts_info_list); 1777 1778 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1779 err = rswitch_device_alloc(priv, i); 1780 if (err < 0) { 1781 for (i--; i >= 0; i--) 1782 rswitch_device_free(priv, i); 1783 goto err_device_alloc; 1784 } 1785 } 1786 1787 rswitch_fwd_init(priv); 1788 1789 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4, 1790 RCAR_GEN4_PTP_CLOCK_S4); 1791 if (err < 0) 1792 goto err_ptp_register; 1793 1794 err = rswitch_gwca_request_irqs(priv); 1795 if (err < 0) 1796 goto err_gwca_request_irq; 1797 1798 err = rswitch_gwca_ts_request_irqs(priv); 1799 if (err < 0) 1800 goto err_gwca_ts_request_irq; 1801 1802 err = rswitch_gwca_hw_init(priv); 1803 if (err < 0) 1804 goto err_gwca_hw_init; 1805 1806 err = rswitch_ether_port_init_all(priv); 1807 if (err) 1808 goto err_ether_port_init_all; 1809 1810 rswitch_for_each_enabled_port(priv, i) { 1811 err = register_netdev(priv->rdev[i]->ndev); 1812 if (err) { 1813 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1814 unregister_netdev(priv->rdev[i]->ndev); 1815 goto err_register_netdev; 1816 } 1817 } 1818 1819 rswitch_for_each_enabled_port(priv, i) 1820 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", 1821 priv->rdev[i]->ndev->dev_addr); 1822 1823 return 0; 1824 1825 err_register_netdev: 1826 rswitch_ether_port_deinit_all(priv); 1827 1828 err_ether_port_init_all: 1829 rswitch_gwca_hw_deinit(priv); 1830 1831 err_gwca_hw_init: 1832 err_gwca_ts_request_irq: 1833 err_gwca_request_irq: 1834 rcar_gen4_ptp_unregister(priv->ptp_priv); 1835 1836 err_ptp_register: 1837 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1838 rswitch_device_free(priv, i); 1839 1840 err_device_alloc: 1841 rswitch_gwca_ts_queue_free(priv); 1842 1843 err_ts_queue_alloc: 1844 rswitch_gwca_linkfix_free(priv); 1845 1846 return err; 1847 } 1848 1849 static int renesas_eth_sw_probe(struct platform_device *pdev) 1850 { 1851 struct rswitch_private *priv; 1852 struct resource *res; 1853 int ret; 1854 1855 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base"); 1856 if (!res) { 1857 dev_err(&pdev->dev, "invalid resource\n"); 1858 return -EINVAL; 1859 } 1860 1861 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 1862 if (!priv) 1863 return -ENOMEM; 1864 1865 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); 1866 if (!priv->ptp_priv) 1867 return -ENOMEM; 1868 1869 platform_set_drvdata(pdev, priv); 1870 priv->pdev = pdev; 1871 priv->addr = devm_ioremap_resource(&pdev->dev, res); 1872 if (IS_ERR(priv->addr)) 1873 return PTR_ERR(priv->addr); 1874 1875 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; 1876 1877 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 1878 if (ret < 0) { 1879 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1880 if (ret < 0) 1881 return ret; 1882 } 1883 1884 priv->gwca.index = AGENT_INDEX_GWCA; 1885 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, 1886 RSWITCH_MAX_NUM_QUEUES); 1887 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, 1888 sizeof(*priv->gwca.queues), GFP_KERNEL); 1889 if (!priv->gwca.queues) 1890 return -ENOMEM; 1891 1892 pm_runtime_enable(&pdev->dev); 1893 pm_runtime_get_sync(&pdev->dev); 1894 1895 ret = rswitch_init(priv); 1896 if (ret < 0) { 1897 pm_runtime_put(&pdev->dev); 1898 pm_runtime_disable(&pdev->dev); 1899 return ret; 1900 } 1901 1902 device_set_wakeup_capable(&pdev->dev, 1); 1903 1904 return ret; 1905 } 1906 1907 static void rswitch_deinit(struct rswitch_private *priv) 1908 { 1909 int i; 1910 1911 rswitch_gwca_hw_deinit(priv); 1912 rcar_gen4_ptp_unregister(priv->ptp_priv); 1913 1914 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1915 struct rswitch_device *rdev = priv->rdev[i]; 1916 1917 phy_exit(priv->rdev[i]->serdes); 1918 rswitch_ether_port_deinit_one(rdev); 1919 unregister_netdev(rdev->ndev); 1920 rswitch_device_free(priv, i); 1921 } 1922 1923 rswitch_gwca_ts_queue_free(priv); 1924 rswitch_gwca_linkfix_free(priv); 1925 1926 rswitch_clock_disable(priv); 1927 } 1928 1929 static int renesas_eth_sw_remove(struct platform_device *pdev) 1930 { 1931 struct rswitch_private *priv = platform_get_drvdata(pdev); 1932 1933 rswitch_deinit(priv); 1934 1935 pm_runtime_put(&pdev->dev); 1936 pm_runtime_disable(&pdev->dev); 1937 1938 platform_set_drvdata(pdev, NULL); 1939 1940 return 0; 1941 } 1942 1943 static struct platform_driver renesas_eth_sw_driver_platform = { 1944 .probe = renesas_eth_sw_probe, 1945 .remove = renesas_eth_sw_remove, 1946 .driver = { 1947 .name = "renesas_eth_sw", 1948 .of_match_table = renesas_eth_sw_of_table, 1949 } 1950 }; 1951 module_platform_driver(renesas_eth_sw_driver_platform); 1952 MODULE_AUTHOR("Yoshihiro Shimoda"); 1953 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver"); 1954 MODULE_LICENSE("GPL"); 1955