1 // SPDX-License-Identifier: GPL-2.0 2 /* Renesas Ethernet Switch device driver 3 * 4 * Copyright (C) 2022 Renesas Electronics Corporation 5 */ 6 7 #include <linux/dma-mapping.h> 8 #include <linux/err.h> 9 #include <linux/etherdevice.h> 10 #include <linux/iopoll.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/net_tstamp.h> 14 #include <linux/of.h> 15 #include <linux/of_device.h> 16 #include <linux/of_irq.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_net.h> 19 #include <linux/phy/phy.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/rtnetlink.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 25 #include "rswitch.h" 26 27 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) 28 { 29 u32 val; 30 31 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected, 32 1, RSWITCH_TIMEOUT_US); 33 } 34 35 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set) 36 { 37 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg); 38 } 39 40 /* Common Agent block (COMA) */ 41 static void rswitch_reset(struct rswitch_private *priv) 42 { 43 iowrite32(RRC_RR, priv->addr + RRC); 44 iowrite32(RRC_RR_CLR, priv->addr + RRC); 45 } 46 47 static void rswitch_clock_enable(struct rswitch_private *priv) 48 { 49 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); 50 } 51 52 static void rswitch_clock_disable(struct rswitch_private *priv) 53 { 54 iowrite32(RCDC_RCD, priv->addr + RCDC); 55 } 56 57 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port) 58 { 59 u32 val = ioread32(coma_addr + RCEC); 60 61 if (val & RCEC_RCE) 62 return (val & BIT(port)) ? true : false; 63 else 64 return false; 65 } 66 67 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable) 68 { 69 u32 val; 70 71 if (enable) { 72 val = ioread32(coma_addr + RCEC); 73 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC); 74 } else { 75 val = ioread32(coma_addr + RCDC); 76 iowrite32(val | BIT(port), coma_addr + RCDC); 77 } 78 } 79 80 static int rswitch_bpool_config(struct rswitch_private *priv) 81 { 82 u32 val; 83 84 val = ioread32(priv->addr + CABPIRM); 85 if (val & CABPIRM_BPR) 86 return 0; 87 88 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); 89 90 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); 91 } 92 93 /* R-Switch-2 block (TOP) */ 94 static void rswitch_top_init(struct rswitch_private *priv) 95 { 96 int i; 97 98 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++) 99 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); 100 } 101 102 /* Forwarding engine block (MFWD) */ 103 static void rswitch_fwd_init(struct rswitch_private *priv) 104 { 105 int i; 106 107 /* For ETHA */ 108 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 109 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i)); 110 iowrite32(0, priv->addr + FWPBFC(i)); 111 } 112 113 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 114 iowrite32(priv->rdev[i]->rx_queue->index, 115 priv->addr + FWPBFCSDC(GWCA_INDEX, i)); 116 iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i)); 117 } 118 119 /* For GWCA */ 120 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index)); 121 iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index)); 122 iowrite32(0, priv->addr + FWPBFC(priv->gwca.index)); 123 iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index)); 124 } 125 126 /* Gateway CPU agent block (GWCA) */ 127 static int rswitch_gwca_change_mode(struct rswitch_private *priv, 128 enum rswitch_gwca_mode mode) 129 { 130 int ret; 131 132 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) 133 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); 134 135 iowrite32(mode, priv->addr + GWMC); 136 137 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); 138 139 if (mode == GWMC_OPC_DISABLE) 140 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); 141 142 return ret; 143 } 144 145 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv) 146 { 147 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); 148 149 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); 150 } 151 152 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv) 153 { 154 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); 155 156 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); 157 } 158 159 static void rswitch_gwca_set_rate_limit(struct rswitch_private *priv, int rate) 160 { 161 u32 gwgrlulc, gwgrlc; 162 163 switch (rate) { 164 case 1000: 165 gwgrlulc = 0x0000005f; 166 gwgrlc = 0x00010260; 167 break; 168 default: 169 dev_err(&priv->pdev->dev, "%s: This rate is not supported (%d)\n", __func__, rate); 170 return; 171 } 172 173 iowrite32(gwgrlulc, priv->addr + GWGRLULC); 174 iowrite32(gwgrlc, priv->addr + GWGRLC); 175 } 176 177 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx) 178 { 179 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; 180 int i; 181 182 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 183 if (dis[i] & mask[i]) 184 return true; 185 } 186 187 return false; 188 } 189 190 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis) 191 { 192 int i; 193 194 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 195 dis[i] = ioread32(priv->addr + GWDIS(i)); 196 dis[i] &= ioread32(priv->addr + GWDIE(i)); 197 } 198 } 199 200 static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable) 201 { 202 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32); 203 204 iowrite32(BIT(index % 32), priv->addr + offs); 205 } 206 207 static void rswitch_ack_data_irq(struct rswitch_private *priv, int index) 208 { 209 u32 offs = GWDIS(index / 32); 210 211 iowrite32(BIT(index % 32), priv->addr + offs); 212 } 213 214 static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num) 215 { 216 int index = cur ? gq->cur : gq->dirty; 217 218 if (index + num >= gq->ring_size) 219 index = (index + num) % gq->ring_size; 220 else 221 index += num; 222 223 return index; 224 } 225 226 static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) 227 { 228 if (gq->cur >= gq->dirty) 229 return gq->cur - gq->dirty; 230 else 231 return gq->ring_size - gq->dirty + gq->cur; 232 } 233 234 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) 235 { 236 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; 237 238 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 239 return true; 240 241 return false; 242 } 243 244 static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq, 245 int start_index, int num) 246 { 247 int i, index; 248 249 for (i = 0; i < num; i++) { 250 index = (i + start_index) % gq->ring_size; 251 if (gq->skbs[index]) 252 continue; 253 gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev, 254 PKT_BUF_SZ + RSWITCH_ALIGN - 1); 255 if (!gq->skbs[index]) 256 goto err; 257 } 258 259 return 0; 260 261 err: 262 for (i--; i >= 0; i--) { 263 index = (i + start_index) % gq->ring_size; 264 dev_kfree_skb(gq->skbs[index]); 265 gq->skbs[index] = NULL; 266 } 267 268 return -ENOMEM; 269 } 270 271 static void rswitch_gwca_queue_free(struct net_device *ndev, 272 struct rswitch_gwca_queue *gq) 273 { 274 int i; 275 276 if (!gq->dir_tx) { 277 dma_free_coherent(ndev->dev.parent, 278 sizeof(struct rswitch_ext_ts_desc) * 279 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); 280 gq->rx_ring = NULL; 281 282 for (i = 0; i < gq->ring_size; i++) 283 dev_kfree_skb(gq->skbs[i]); 284 } else { 285 dma_free_coherent(ndev->dev.parent, 286 sizeof(struct rswitch_ext_desc) * 287 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); 288 gq->tx_ring = NULL; 289 } 290 291 kfree(gq->skbs); 292 gq->skbs = NULL; 293 } 294 295 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) 296 { 297 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 298 299 dma_free_coherent(&priv->pdev->dev, 300 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), 301 gq->ts_ring, gq->ring_dma); 302 gq->ts_ring = NULL; 303 } 304 305 static int rswitch_gwca_queue_alloc(struct net_device *ndev, 306 struct rswitch_private *priv, 307 struct rswitch_gwca_queue *gq, 308 bool dir_tx, int ring_size) 309 { 310 int i, bit; 311 312 gq->dir_tx = dir_tx; 313 gq->ring_size = ring_size; 314 gq->ndev = ndev; 315 316 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); 317 if (!gq->skbs) 318 return -ENOMEM; 319 320 if (!dir_tx) { 321 rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size); 322 323 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, 324 sizeof(struct rswitch_ext_ts_desc) * 325 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 326 } else { 327 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, 328 sizeof(struct rswitch_ext_desc) * 329 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 330 } 331 332 if (!gq->rx_ring && !gq->tx_ring) 333 goto out; 334 335 i = gq->index / 32; 336 bit = BIT(gq->index % 32); 337 if (dir_tx) 338 priv->gwca.tx_irq_bits[i] |= bit; 339 else 340 priv->gwca.rx_irq_bits[i] |= bit; 341 342 return 0; 343 344 out: 345 rswitch_gwca_queue_free(ndev, gq); 346 347 return -ENOMEM; 348 } 349 350 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) 351 { 352 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 353 354 gq->ring_size = TS_RING_SIZE; 355 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, 356 sizeof(struct rswitch_ts_desc) * 357 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 358 return !gq->ts_ring ? -ENOMEM : 0; 359 } 360 361 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) 362 { 363 desc->dptrl = cpu_to_le32(lower_32_bits(addr)); 364 desc->dptrh = upper_32_bits(addr) & 0xff; 365 } 366 367 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc) 368 { 369 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; 370 } 371 372 static int rswitch_gwca_queue_format(struct net_device *ndev, 373 struct rswitch_private *priv, 374 struct rswitch_gwca_queue *gq) 375 { 376 int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; 377 struct rswitch_ext_desc *desc; 378 struct rswitch_desc *linkfix; 379 dma_addr_t dma_addr; 380 int i; 381 382 memset(gq->tx_ring, 0, ring_size); 383 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { 384 if (!gq->dir_tx) { 385 dma_addr = dma_map_single(ndev->dev.parent, 386 gq->skbs[i]->data, PKT_BUF_SZ, 387 DMA_FROM_DEVICE); 388 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 389 goto err; 390 391 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); 392 rswitch_desc_set_dptr(&desc->desc, dma_addr); 393 desc->desc.die_dt = DT_FEMPTY | DIE; 394 } else { 395 desc->desc.die_dt = DT_EEMPTY | DIE; 396 } 397 } 398 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 399 desc->desc.die_dt = DT_LINKFIX; 400 401 linkfix = &priv->gwca.linkfix_table[gq->index]; 402 linkfix->die_dt = DT_LINKFIX; 403 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 404 405 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DQT : 0) | GWDCC_EDE, 406 priv->addr + GWDCC_OFFS(gq->index)); 407 408 return 0; 409 410 err: 411 if (!gq->dir_tx) { 412 for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) { 413 dma_addr = rswitch_desc_get_dptr(&desc->desc); 414 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, 415 DMA_FROM_DEVICE); 416 } 417 } 418 419 return -ENOMEM; 420 } 421 422 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, 423 int start_index, int num) 424 { 425 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 426 struct rswitch_ts_desc *desc; 427 int i, index; 428 429 for (i = 0; i < num; i++) { 430 index = (i + start_index) % gq->ring_size; 431 desc = &gq->ts_ring[index]; 432 desc->desc.die_dt = DT_FEMPTY_ND | DIE; 433 } 434 } 435 436 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, 437 struct rswitch_gwca_queue *gq, 438 int start_index, int num) 439 { 440 struct rswitch_device *rdev = netdev_priv(ndev); 441 struct rswitch_ext_ts_desc *desc; 442 dma_addr_t dma_addr; 443 int i, index; 444 445 for (i = 0; i < num; i++) { 446 index = (i + start_index) % gq->ring_size; 447 desc = &gq->rx_ring[index]; 448 if (!gq->dir_tx) { 449 dma_addr = dma_map_single(ndev->dev.parent, 450 gq->skbs[index]->data, PKT_BUF_SZ, 451 DMA_FROM_DEVICE); 452 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 453 goto err; 454 455 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); 456 rswitch_desc_set_dptr(&desc->desc, dma_addr); 457 dma_wmb(); 458 desc->desc.die_dt = DT_FEMPTY | DIE; 459 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); 460 } else { 461 desc->desc.die_dt = DT_EEMPTY | DIE; 462 } 463 } 464 465 return 0; 466 467 err: 468 if (!gq->dir_tx) { 469 for (i--; i >= 0; i--) { 470 index = (i + start_index) % gq->ring_size; 471 desc = &gq->rx_ring[index]; 472 dma_addr = rswitch_desc_get_dptr(&desc->desc); 473 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, 474 DMA_FROM_DEVICE); 475 } 476 } 477 478 return -ENOMEM; 479 } 480 481 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, 482 struct rswitch_private *priv, 483 struct rswitch_gwca_queue *gq) 484 { 485 int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; 486 struct rswitch_ext_ts_desc *desc; 487 struct rswitch_desc *linkfix; 488 int err; 489 490 memset(gq->rx_ring, 0, ring_size); 491 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); 492 if (err < 0) 493 return err; 494 495 desc = &gq->rx_ring[gq->ring_size]; /* Last */ 496 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 497 desc->desc.die_dt = DT_LINKFIX; 498 499 linkfix = &priv->gwca.linkfix_table[gq->index]; 500 linkfix->die_dt = DT_LINKFIX; 501 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 502 503 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DQT : 0) | GWDCC_ETS | GWDCC_EDE, 504 priv->addr + GWDCC_OFFS(gq->index)); 505 506 return 0; 507 } 508 509 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) 510 { 511 int i, num_queues = priv->gwca.num_queues; 512 struct rswitch_gwca *gwca = &priv->gwca; 513 struct device *dev = &priv->pdev->dev; 514 515 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; 516 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, 517 &gwca->linkfix_table_dma, GFP_KERNEL); 518 if (!gwca->linkfix_table) 519 return -ENOMEM; 520 for (i = 0; i < num_queues; i++) 521 gwca->linkfix_table[i].die_dt = DT_EOS; 522 523 return 0; 524 } 525 526 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) 527 { 528 struct rswitch_gwca *gwca = &priv->gwca; 529 530 if (gwca->linkfix_table) 531 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, 532 gwca->linkfix_table, gwca->linkfix_table_dma); 533 gwca->linkfix_table = NULL; 534 } 535 536 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) 537 { 538 struct rswitch_gwca_queue *gq; 539 int index; 540 541 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); 542 if (index >= priv->gwca.num_queues) 543 return NULL; 544 set_bit(index, priv->gwca.used); 545 gq = &priv->gwca.queues[index]; 546 memset(gq, 0, sizeof(*gq)); 547 gq->index = index; 548 549 return gq; 550 } 551 552 static void rswitch_gwca_put(struct rswitch_private *priv, 553 struct rswitch_gwca_queue *gq) 554 { 555 clear_bit(gq->index, priv->gwca.used); 556 } 557 558 static int rswitch_txdmac_alloc(struct net_device *ndev) 559 { 560 struct rswitch_device *rdev = netdev_priv(ndev); 561 struct rswitch_private *priv = rdev->priv; 562 int err; 563 564 rdev->tx_queue = rswitch_gwca_get(priv); 565 if (!rdev->tx_queue) 566 return -EBUSY; 567 568 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); 569 if (err < 0) { 570 rswitch_gwca_put(priv, rdev->tx_queue); 571 return err; 572 } 573 574 return 0; 575 } 576 577 static void rswitch_txdmac_free(struct net_device *ndev) 578 { 579 struct rswitch_device *rdev = netdev_priv(ndev); 580 581 rswitch_gwca_queue_free(ndev, rdev->tx_queue); 582 rswitch_gwca_put(rdev->priv, rdev->tx_queue); 583 } 584 585 static int rswitch_txdmac_init(struct rswitch_private *priv, int index) 586 { 587 struct rswitch_device *rdev = priv->rdev[index]; 588 589 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); 590 } 591 592 static int rswitch_rxdmac_alloc(struct net_device *ndev) 593 { 594 struct rswitch_device *rdev = netdev_priv(ndev); 595 struct rswitch_private *priv = rdev->priv; 596 int err; 597 598 rdev->rx_queue = rswitch_gwca_get(priv); 599 if (!rdev->rx_queue) 600 return -EBUSY; 601 602 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); 603 if (err < 0) { 604 rswitch_gwca_put(priv, rdev->rx_queue); 605 return err; 606 } 607 608 return 0; 609 } 610 611 static void rswitch_rxdmac_free(struct net_device *ndev) 612 { 613 struct rswitch_device *rdev = netdev_priv(ndev); 614 615 rswitch_gwca_queue_free(ndev, rdev->rx_queue); 616 rswitch_gwca_put(rdev->priv, rdev->rx_queue); 617 } 618 619 static int rswitch_rxdmac_init(struct rswitch_private *priv, int index) 620 { 621 struct rswitch_device *rdev = priv->rdev[index]; 622 struct net_device *ndev = rdev->ndev; 623 624 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); 625 } 626 627 static int rswitch_gwca_hw_init(struct rswitch_private *priv) 628 { 629 int i, err; 630 631 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 632 if (err < 0) 633 return err; 634 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG); 635 if (err < 0) 636 return err; 637 638 err = rswitch_gwca_mcast_table_reset(priv); 639 if (err < 0) 640 return err; 641 err = rswitch_gwca_axi_ram_reset(priv); 642 if (err < 0) 643 return err; 644 645 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); 646 iowrite32(0, priv->addr + GWTTFC); 647 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); 648 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); 649 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); 650 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); 651 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); 652 rswitch_gwca_set_rate_limit(priv, priv->gwca.speed); 653 654 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 655 err = rswitch_rxdmac_init(priv, i); 656 if (err < 0) 657 return err; 658 err = rswitch_txdmac_init(priv, i); 659 if (err < 0) 660 return err; 661 } 662 663 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 664 if (err < 0) 665 return err; 666 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION); 667 } 668 669 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv) 670 { 671 int err; 672 673 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 674 if (err < 0) 675 return err; 676 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET); 677 if (err < 0) 678 return err; 679 680 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 681 } 682 683 static int rswitch_gwca_halt(struct rswitch_private *priv) 684 { 685 int err; 686 687 priv->gwca_halt = true; 688 err = rswitch_gwca_hw_deinit(priv); 689 dev_err(&priv->pdev->dev, "halted (%d)\n", err); 690 691 return err; 692 } 693 694 static bool rswitch_rx(struct net_device *ndev, int *quota) 695 { 696 struct rswitch_device *rdev = netdev_priv(ndev); 697 struct rswitch_gwca_queue *gq = rdev->rx_queue; 698 struct rswitch_ext_ts_desc *desc; 699 int limit, boguscnt, num, ret; 700 struct sk_buff *skb; 701 dma_addr_t dma_addr; 702 u16 pkt_len; 703 u32 get_ts; 704 705 boguscnt = min_t(int, gq->ring_size, *quota); 706 limit = boguscnt; 707 708 desc = &gq->rx_ring[gq->cur]; 709 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { 710 if (--boguscnt < 0) 711 break; 712 dma_rmb(); 713 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; 714 skb = gq->skbs[gq->cur]; 715 gq->skbs[gq->cur] = NULL; 716 dma_addr = rswitch_desc_get_dptr(&desc->desc); 717 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE); 718 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 719 if (get_ts) { 720 struct skb_shared_hwtstamps *shhwtstamps; 721 struct timespec64 ts; 722 723 shhwtstamps = skb_hwtstamps(skb); 724 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 725 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 726 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 727 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); 728 } 729 skb_put(skb, pkt_len); 730 skb->protocol = eth_type_trans(skb, ndev); 731 netif_receive_skb(skb); 732 rdev->ndev->stats.rx_packets++; 733 rdev->ndev->stats.rx_bytes += pkt_len; 734 735 gq->cur = rswitch_next_queue_index(gq, true, 1); 736 desc = &gq->rx_ring[gq->cur]; 737 } 738 739 num = rswitch_get_num_cur_queues(gq); 740 ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num); 741 if (ret < 0) 742 goto err; 743 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); 744 if (ret < 0) 745 goto err; 746 gq->dirty = rswitch_next_queue_index(gq, false, num); 747 748 *quota -= limit - (++boguscnt); 749 750 return boguscnt <= 0; 751 752 err: 753 rswitch_gwca_halt(rdev->priv); 754 755 return 0; 756 } 757 758 static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only) 759 { 760 struct rswitch_device *rdev = netdev_priv(ndev); 761 struct rswitch_gwca_queue *gq = rdev->tx_queue; 762 struct rswitch_ext_desc *desc; 763 dma_addr_t dma_addr; 764 struct sk_buff *skb; 765 int free_num = 0; 766 int size; 767 768 for (; rswitch_get_num_cur_queues(gq) > 0; 769 gq->dirty = rswitch_next_queue_index(gq, false, 1)) { 770 desc = &gq->tx_ring[gq->dirty]; 771 if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 772 break; 773 774 dma_rmb(); 775 size = le16_to_cpu(desc->desc.info_ds) & TX_DS; 776 skb = gq->skbs[gq->dirty]; 777 if (skb) { 778 dma_addr = rswitch_desc_get_dptr(&desc->desc); 779 dma_unmap_single(ndev->dev.parent, dma_addr, 780 size, DMA_TO_DEVICE); 781 dev_kfree_skb_any(gq->skbs[gq->dirty]); 782 gq->skbs[gq->dirty] = NULL; 783 free_num++; 784 } 785 desc->desc.die_dt = DT_EEMPTY; 786 rdev->ndev->stats.tx_packets++; 787 rdev->ndev->stats.tx_bytes += size; 788 } 789 790 return free_num; 791 } 792 793 static int rswitch_poll(struct napi_struct *napi, int budget) 794 { 795 struct net_device *ndev = napi->dev; 796 struct rswitch_private *priv; 797 struct rswitch_device *rdev; 798 int quota = budget; 799 800 rdev = netdev_priv(ndev); 801 priv = rdev->priv; 802 803 retry: 804 rswitch_tx_free(ndev, true); 805 806 if (rswitch_rx(ndev, "a)) 807 goto out; 808 else if (rdev->priv->gwca_halt) 809 goto err; 810 else if (rswitch_is_queue_rxed(rdev->rx_queue)) 811 goto retry; 812 813 netif_wake_subqueue(ndev, 0); 814 815 napi_complete(napi); 816 817 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); 818 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); 819 820 out: 821 return budget - quota; 822 823 err: 824 napi_complete(napi); 825 826 return 0; 827 } 828 829 static void rswitch_queue_interrupt(struct net_device *ndev) 830 { 831 struct rswitch_device *rdev = netdev_priv(ndev); 832 833 if (napi_schedule_prep(&rdev->napi)) { 834 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 835 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 836 __napi_schedule(&rdev->napi); 837 } 838 } 839 840 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) 841 { 842 struct rswitch_gwca_queue *gq; 843 int i, index, bit; 844 845 for (i = 0; i < priv->gwca.num_queues; i++) { 846 gq = &priv->gwca.queues[i]; 847 index = gq->index / 32; 848 bit = BIT(gq->index % 32); 849 if (!(dis[index] & bit)) 850 continue; 851 852 rswitch_ack_data_irq(priv, gq->index); 853 rswitch_queue_interrupt(gq->ndev); 854 } 855 856 return IRQ_HANDLED; 857 } 858 859 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id) 860 { 861 struct rswitch_private *priv = dev_id; 862 u32 dis[RSWITCH_NUM_IRQ_REGS]; 863 irqreturn_t ret = IRQ_NONE; 864 865 rswitch_get_data_irq_status(priv, dis); 866 867 if (rswitch_is_any_data_irq(priv, dis, true) || 868 rswitch_is_any_data_irq(priv, dis, false)) 869 ret = rswitch_data_irq(priv, dis); 870 871 return ret; 872 } 873 874 static int rswitch_gwca_request_irqs(struct rswitch_private *priv) 875 { 876 char *resource_name, *irq_name; 877 int i, ret, irq; 878 879 for (i = 0; i < GWCA_NUM_IRQS; i++) { 880 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i); 881 if (!resource_name) 882 return -ENOMEM; 883 884 irq = platform_get_irq_byname(priv->pdev, resource_name); 885 kfree(resource_name); 886 if (irq < 0) 887 return irq; 888 889 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, 890 GWCA_IRQ_NAME, i); 891 if (!irq_name) 892 return -ENOMEM; 893 894 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, 895 0, irq_name, priv); 896 if (ret < 0) 897 return ret; 898 } 899 900 return 0; 901 } 902 903 static void rswitch_ts(struct rswitch_private *priv) 904 { 905 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 906 struct rswitch_gwca_ts_info *ts_info, *ts_info2; 907 struct skb_shared_hwtstamps shhwtstamps; 908 struct rswitch_ts_desc *desc; 909 struct timespec64 ts; 910 u32 tag, port; 911 int num; 912 913 desc = &gq->ts_ring[gq->cur]; 914 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { 915 dma_rmb(); 916 917 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); 918 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); 919 920 list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) { 921 if (!(ts_info->port == port && ts_info->tag == tag)) 922 continue; 923 924 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 925 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 926 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 927 shhwtstamps.hwtstamp = timespec64_to_ktime(ts); 928 skb_tstamp_tx(ts_info->skb, &shhwtstamps); 929 dev_consume_skb_irq(ts_info->skb); 930 list_del(&ts_info->list); 931 kfree(ts_info); 932 break; 933 } 934 935 gq->cur = rswitch_next_queue_index(gq, true, 1); 936 desc = &gq->ts_ring[gq->cur]; 937 } 938 939 num = rswitch_get_num_cur_queues(gq); 940 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); 941 gq->dirty = rswitch_next_queue_index(gq, false, num); 942 } 943 944 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) 945 { 946 struct rswitch_private *priv = dev_id; 947 948 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { 949 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); 950 rswitch_ts(priv); 951 952 return IRQ_HANDLED; 953 } 954 955 return IRQ_NONE; 956 } 957 958 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) 959 { 960 int irq; 961 962 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); 963 if (irq < 0) 964 return irq; 965 966 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, 967 0, GWCA_TS_IRQ_NAME, priv); 968 } 969 970 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ 971 static int rswitch_etha_change_mode(struct rswitch_etha *etha, 972 enum rswitch_etha_mode mode) 973 { 974 int ret; 975 976 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) 977 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); 978 979 iowrite32(mode, etha->addr + EAMC); 980 981 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); 982 983 if (mode == EAMC_OPC_DISABLE) 984 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); 985 986 return ret; 987 } 988 989 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha) 990 { 991 u32 mrmac0 = ioread32(etha->addr + MRMAC0); 992 u32 mrmac1 = ioread32(etha->addr + MRMAC1); 993 u8 *mac = ða->mac_addr[0]; 994 995 mac[0] = (mrmac0 >> 8) & 0xFF; 996 mac[1] = (mrmac0 >> 0) & 0xFF; 997 mac[2] = (mrmac1 >> 24) & 0xFF; 998 mac[3] = (mrmac1 >> 16) & 0xFF; 999 mac[4] = (mrmac1 >> 8) & 0xFF; 1000 mac[5] = (mrmac1 >> 0) & 0xFF; 1001 } 1002 1003 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac) 1004 { 1005 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); 1006 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], 1007 etha->addr + MRMAC1); 1008 } 1009 1010 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) 1011 { 1012 iowrite32(MLVC_PLV, etha->addr + MLVC); 1013 1014 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); 1015 } 1016 1017 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) 1018 { 1019 u32 val; 1020 1021 rswitch_etha_write_mac_address(etha, mac); 1022 1023 switch (etha->speed) { 1024 case 100: 1025 val = MPIC_LSC_100M; 1026 break; 1027 case 1000: 1028 val = MPIC_LSC_1G; 1029 break; 1030 case 2500: 1031 val = MPIC_LSC_2_5G; 1032 break; 1033 default: 1034 return; 1035 } 1036 1037 iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC); 1038 } 1039 1040 static void rswitch_etha_enable_mii(struct rswitch_etha *etha) 1041 { 1042 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, 1043 MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06)); 1044 rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); 1045 } 1046 1047 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) 1048 { 1049 int err; 1050 1051 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1052 if (err < 0) 1053 return err; 1054 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG); 1055 if (err < 0) 1056 return err; 1057 1058 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); 1059 rswitch_rmac_setting(etha, mac); 1060 rswitch_etha_enable_mii(etha); 1061 1062 err = rswitch_etha_wait_link_verification(etha); 1063 if (err < 0) 1064 return err; 1065 1066 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1067 if (err < 0) 1068 return err; 1069 1070 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION); 1071 } 1072 1073 static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read, 1074 int phyad, int devad, int regad, int data) 1075 { 1076 int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45; 1077 u32 val; 1078 int ret; 1079 1080 if (devad == 0xffffffff) 1081 return -ENODEV; 1082 1083 writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1); 1084 1085 val = MPSM_PSME | MPSM_MFF_C45; 1086 iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); 1087 1088 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); 1089 if (ret) 1090 return ret; 1091 1092 rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); 1093 1094 if (read) { 1095 writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); 1096 1097 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); 1098 if (ret) 1099 return ret; 1100 1101 ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16; 1102 1103 rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); 1104 } else { 1105 iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val, 1106 etha->addr + MPSM); 1107 1108 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS); 1109 } 1110 1111 return ret; 1112 } 1113 1114 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, 1115 int regad) 1116 { 1117 struct rswitch_etha *etha = bus->priv; 1118 1119 return rswitch_etha_set_access(etha, true, addr, devad, regad, 0); 1120 } 1121 1122 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, 1123 int regad, u16 val) 1124 { 1125 struct rswitch_etha *etha = bus->priv; 1126 1127 return rswitch_etha_set_access(etha, false, addr, devad, regad, val); 1128 } 1129 1130 /* Call of_node_put(port) after done */ 1131 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) 1132 { 1133 struct device_node *ports, *port; 1134 int err = 0; 1135 u32 index; 1136 1137 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, 1138 "ethernet-ports"); 1139 if (!ports) 1140 return NULL; 1141 1142 for_each_child_of_node(ports, port) { 1143 err = of_property_read_u32(port, "reg", &index); 1144 if (err < 0) { 1145 port = NULL; 1146 goto out; 1147 } 1148 if (index == rdev->etha->index) { 1149 if (!of_device_is_available(port)) 1150 port = NULL; 1151 break; 1152 } 1153 } 1154 1155 out: 1156 of_node_put(ports); 1157 1158 return port; 1159 } 1160 1161 static int rswitch_etha_get_params(struct rswitch_device *rdev) 1162 { 1163 u32 max_speed; 1164 int err; 1165 1166 if (!rdev->np_port) 1167 return 0; /* ignored */ 1168 1169 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); 1170 if (err) 1171 return err; 1172 1173 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); 1174 if (!err) { 1175 rdev->etha->speed = max_speed; 1176 return 0; 1177 } 1178 1179 /* if no "max-speed" property, let's use default speed */ 1180 switch (rdev->etha->phy_interface) { 1181 case PHY_INTERFACE_MODE_MII: 1182 rdev->etha->speed = SPEED_100; 1183 break; 1184 case PHY_INTERFACE_MODE_SGMII: 1185 rdev->etha->speed = SPEED_1000; 1186 break; 1187 case PHY_INTERFACE_MODE_USXGMII: 1188 rdev->etha->speed = SPEED_2500; 1189 break; 1190 default: 1191 return -EINVAL; 1192 } 1193 1194 return 0; 1195 } 1196 1197 static int rswitch_mii_register(struct rswitch_device *rdev) 1198 { 1199 struct device_node *mdio_np; 1200 struct mii_bus *mii_bus; 1201 int err; 1202 1203 mii_bus = mdiobus_alloc(); 1204 if (!mii_bus) 1205 return -ENOMEM; 1206 1207 mii_bus->name = "rswitch_mii"; 1208 sprintf(mii_bus->id, "etha%d", rdev->etha->index); 1209 mii_bus->priv = rdev->etha; 1210 mii_bus->read_c45 = rswitch_etha_mii_read_c45; 1211 mii_bus->write_c45 = rswitch_etha_mii_write_c45; 1212 mii_bus->parent = &rdev->priv->pdev->dev; 1213 1214 mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); 1215 err = of_mdiobus_register(mii_bus, mdio_np); 1216 if (err < 0) { 1217 mdiobus_free(mii_bus); 1218 goto out; 1219 } 1220 1221 rdev->etha->mii = mii_bus; 1222 1223 out: 1224 of_node_put(mdio_np); 1225 1226 return err; 1227 } 1228 1229 static void rswitch_mii_unregister(struct rswitch_device *rdev) 1230 { 1231 if (rdev->etha->mii) { 1232 mdiobus_unregister(rdev->etha->mii); 1233 mdiobus_free(rdev->etha->mii); 1234 rdev->etha->mii = NULL; 1235 } 1236 } 1237 1238 static void rswitch_adjust_link(struct net_device *ndev) 1239 { 1240 struct rswitch_device *rdev = netdev_priv(ndev); 1241 struct phy_device *phydev = ndev->phydev; 1242 1243 /* Current hardware has a restriction not to change speed at runtime */ 1244 if (phydev->link != rdev->etha->link) { 1245 phy_print_status(phydev); 1246 if (phydev->link) 1247 phy_power_on(rdev->serdes); 1248 else 1249 phy_power_off(rdev->serdes); 1250 1251 rdev->etha->link = phydev->link; 1252 } 1253 } 1254 1255 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, 1256 struct phy_device *phydev) 1257 { 1258 /* Current hardware has a restriction not to change speed at runtime */ 1259 switch (rdev->etha->speed) { 1260 case SPEED_2500: 1261 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1262 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1263 break; 1264 case SPEED_1000: 1265 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1266 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1267 break; 1268 case SPEED_100: 1269 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1270 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1271 break; 1272 default: 1273 break; 1274 } 1275 1276 phy_set_max_speed(phydev, rdev->etha->speed); 1277 } 1278 1279 static int rswitch_phy_device_init(struct rswitch_device *rdev) 1280 { 1281 struct phy_device *phydev; 1282 struct device_node *phy; 1283 int err = -ENOENT; 1284 1285 if (!rdev->np_port) 1286 return -ENODEV; 1287 1288 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); 1289 if (!phy) 1290 return -ENODEV; 1291 1292 /* Set phydev->host_interfaces before calling of_phy_connect() to 1293 * configure the PHY with the information of host_interfaces. 1294 */ 1295 phydev = of_phy_find_device(phy); 1296 if (!phydev) 1297 goto out; 1298 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); 1299 1300 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, 1301 rdev->etha->phy_interface); 1302 if (!phydev) 1303 goto out; 1304 1305 phy_set_max_speed(phydev, SPEED_2500); 1306 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 1307 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 1308 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 1309 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1310 rswitch_phy_remove_link_mode(rdev, phydev); 1311 1312 phy_attached_info(phydev); 1313 1314 err = 0; 1315 out: 1316 of_node_put(phy); 1317 1318 return err; 1319 } 1320 1321 static void rswitch_phy_device_deinit(struct rswitch_device *rdev) 1322 { 1323 if (rdev->ndev->phydev) { 1324 phy_disconnect(rdev->ndev->phydev); 1325 rdev->ndev->phydev = NULL; 1326 } 1327 } 1328 1329 static int rswitch_serdes_set_params(struct rswitch_device *rdev) 1330 { 1331 int err; 1332 1333 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, 1334 rdev->etha->phy_interface); 1335 if (err < 0) 1336 return err; 1337 1338 return phy_set_speed(rdev->serdes, rdev->etha->speed); 1339 } 1340 1341 static int rswitch_ether_port_init_one(struct rswitch_device *rdev) 1342 { 1343 int err; 1344 1345 if (!rdev->etha->operated) { 1346 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1347 if (err < 0) 1348 return err; 1349 rdev->etha->operated = true; 1350 } 1351 1352 err = rswitch_mii_register(rdev); 1353 if (err < 0) 1354 return err; 1355 1356 err = rswitch_phy_device_init(rdev); 1357 if (err < 0) 1358 goto err_phy_device_init; 1359 1360 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); 1361 if (IS_ERR(rdev->serdes)) { 1362 err = PTR_ERR(rdev->serdes); 1363 goto err_serdes_phy_get; 1364 } 1365 1366 err = rswitch_serdes_set_params(rdev); 1367 if (err < 0) 1368 goto err_serdes_set_params; 1369 1370 return 0; 1371 1372 err_serdes_set_params: 1373 err_serdes_phy_get: 1374 rswitch_phy_device_deinit(rdev); 1375 1376 err_phy_device_init: 1377 rswitch_mii_unregister(rdev); 1378 1379 return err; 1380 } 1381 1382 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) 1383 { 1384 rswitch_phy_device_deinit(rdev); 1385 rswitch_mii_unregister(rdev); 1386 } 1387 1388 static int rswitch_ether_port_init_all(struct rswitch_private *priv) 1389 { 1390 int i, err; 1391 1392 rswitch_for_each_enabled_port(priv, i) { 1393 err = rswitch_ether_port_init_one(priv->rdev[i]); 1394 if (err) 1395 goto err_init_one; 1396 } 1397 1398 rswitch_for_each_enabled_port(priv, i) { 1399 err = phy_init(priv->rdev[i]->serdes); 1400 if (err) 1401 goto err_serdes; 1402 } 1403 1404 return 0; 1405 1406 err_serdes: 1407 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1408 phy_exit(priv->rdev[i]->serdes); 1409 i = RSWITCH_NUM_PORTS; 1410 1411 err_init_one: 1412 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1413 rswitch_ether_port_deinit_one(priv->rdev[i]); 1414 1415 return err; 1416 } 1417 1418 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) 1419 { 1420 int i; 1421 1422 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1423 phy_exit(priv->rdev[i]->serdes); 1424 rswitch_ether_port_deinit_one(priv->rdev[i]); 1425 } 1426 } 1427 1428 static int rswitch_open(struct net_device *ndev) 1429 { 1430 struct rswitch_device *rdev = netdev_priv(ndev); 1431 1432 phy_start(ndev->phydev); 1433 1434 napi_enable(&rdev->napi); 1435 netif_start_queue(ndev); 1436 1437 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); 1438 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); 1439 1440 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); 1441 1442 return 0; 1443 }; 1444 1445 static int rswitch_stop(struct net_device *ndev) 1446 { 1447 struct rswitch_device *rdev = netdev_priv(ndev); 1448 struct rswitch_gwca_ts_info *ts_info, *ts_info2; 1449 1450 netif_tx_stop_all_queues(ndev); 1451 1452 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); 1453 1454 list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { 1455 if (ts_info->port != rdev->port) 1456 continue; 1457 dev_kfree_skb_irq(ts_info->skb); 1458 list_del(&ts_info->list); 1459 kfree(ts_info); 1460 } 1461 1462 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 1463 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 1464 1465 phy_stop(ndev->phydev); 1466 napi_disable(&rdev->napi); 1467 1468 return 0; 1469 }; 1470 1471 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1472 { 1473 struct rswitch_device *rdev = netdev_priv(ndev); 1474 struct rswitch_gwca_queue *gq = rdev->tx_queue; 1475 struct rswitch_ext_desc *desc; 1476 int ret = NETDEV_TX_OK; 1477 dma_addr_t dma_addr; 1478 1479 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { 1480 netif_stop_subqueue(ndev, 0); 1481 return ret; 1482 } 1483 1484 if (skb_put_padto(skb, ETH_ZLEN)) 1485 return ret; 1486 1487 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); 1488 if (dma_mapping_error(ndev->dev.parent, dma_addr)) { 1489 dev_kfree_skb_any(skb); 1490 return ret; 1491 } 1492 1493 gq->skbs[gq->cur] = skb; 1494 desc = &gq->tx_ring[gq->cur]; 1495 rswitch_desc_set_dptr(&desc->desc, dma_addr); 1496 desc->desc.info_ds = cpu_to_le16(skb->len); 1497 1498 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | INFO1_FMT); 1499 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 1500 struct rswitch_gwca_ts_info *ts_info; 1501 1502 ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); 1503 if (!ts_info) { 1504 dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); 1505 return -ENOMEM; 1506 } 1507 1508 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1509 rdev->ts_tag++; 1510 desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC); 1511 1512 ts_info->skb = skb_get(skb); 1513 ts_info->port = rdev->port; 1514 ts_info->tag = rdev->ts_tag; 1515 list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list); 1516 1517 skb_tx_timestamp(skb); 1518 } 1519 1520 dma_wmb(); 1521 1522 desc->desc.die_dt = DT_FSINGLE | DIE; 1523 wmb(); /* gq->cur must be incremented after die_dt was set */ 1524 1525 gq->cur = rswitch_next_queue_index(gq, true, 1); 1526 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); 1527 1528 return ret; 1529 } 1530 1531 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) 1532 { 1533 return &ndev->stats; 1534 } 1535 1536 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) 1537 { 1538 struct rswitch_device *rdev = netdev_priv(ndev); 1539 struct rcar_gen4_ptp_private *ptp_priv; 1540 struct hwtstamp_config config; 1541 1542 ptp_priv = rdev->priv->ptp_priv; 1543 1544 config.flags = 0; 1545 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : 1546 HWTSTAMP_TX_OFF; 1547 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { 1548 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: 1549 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1550 break; 1551 case RCAR_GEN4_RXTSTAMP_TYPE_ALL: 1552 config.rx_filter = HWTSTAMP_FILTER_ALL; 1553 break; 1554 default: 1555 config.rx_filter = HWTSTAMP_FILTER_NONE; 1556 break; 1557 } 1558 1559 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1560 } 1561 1562 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) 1563 { 1564 struct rswitch_device *rdev = netdev_priv(ndev); 1565 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; 1566 struct hwtstamp_config config; 1567 u32 tstamp_tx_ctrl; 1568 1569 if (copy_from_user(&config, req->ifr_data, sizeof(config))) 1570 return -EFAULT; 1571 1572 if (config.flags) 1573 return -EINVAL; 1574 1575 switch (config.tx_type) { 1576 case HWTSTAMP_TX_OFF: 1577 tstamp_tx_ctrl = 0; 1578 break; 1579 case HWTSTAMP_TX_ON: 1580 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; 1581 break; 1582 default: 1583 return -ERANGE; 1584 } 1585 1586 switch (config.rx_filter) { 1587 case HWTSTAMP_FILTER_NONE: 1588 tstamp_rx_ctrl = 0; 1589 break; 1590 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1591 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 1592 break; 1593 default: 1594 config.rx_filter = HWTSTAMP_FILTER_ALL; 1595 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; 1596 break; 1597 } 1598 1599 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; 1600 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; 1601 1602 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1603 } 1604 1605 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 1606 { 1607 if (!netif_running(ndev)) 1608 return -EINVAL; 1609 1610 switch (cmd) { 1611 case SIOCGHWTSTAMP: 1612 return rswitch_hwstamp_get(ndev, req); 1613 case SIOCSHWTSTAMP: 1614 return rswitch_hwstamp_set(ndev, req); 1615 default: 1616 return phy_mii_ioctl(ndev->phydev, req, cmd); 1617 } 1618 } 1619 1620 static const struct net_device_ops rswitch_netdev_ops = { 1621 .ndo_open = rswitch_open, 1622 .ndo_stop = rswitch_stop, 1623 .ndo_start_xmit = rswitch_start_xmit, 1624 .ndo_get_stats = rswitch_get_stats, 1625 .ndo_eth_ioctl = rswitch_eth_ioctl, 1626 .ndo_validate_addr = eth_validate_addr, 1627 .ndo_set_mac_address = eth_mac_addr, 1628 }; 1629 1630 static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) 1631 { 1632 struct rswitch_device *rdev = netdev_priv(ndev); 1633 1634 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); 1635 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1636 SOF_TIMESTAMPING_RX_SOFTWARE | 1637 SOF_TIMESTAMPING_SOFTWARE | 1638 SOF_TIMESTAMPING_TX_HARDWARE | 1639 SOF_TIMESTAMPING_RX_HARDWARE | 1640 SOF_TIMESTAMPING_RAW_HARDWARE; 1641 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 1642 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 1643 1644 return 0; 1645 } 1646 1647 static const struct ethtool_ops rswitch_ethtool_ops = { 1648 .get_ts_info = rswitch_get_ts_info, 1649 }; 1650 1651 static const struct of_device_id renesas_eth_sw_of_table[] = { 1652 { .compatible = "renesas,r8a779f0-ether-switch", }, 1653 { } 1654 }; 1655 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table); 1656 1657 static void rswitch_etha_init(struct rswitch_private *priv, int index) 1658 { 1659 struct rswitch_etha *etha = &priv->etha[index]; 1660 1661 memset(etha, 0, sizeof(*etha)); 1662 etha->index = index; 1663 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; 1664 etha->coma_addr = priv->addr; 1665 } 1666 1667 static int rswitch_device_alloc(struct rswitch_private *priv, int index) 1668 { 1669 struct platform_device *pdev = priv->pdev; 1670 struct rswitch_device *rdev; 1671 struct net_device *ndev; 1672 int err; 1673 1674 if (index >= RSWITCH_NUM_PORTS) 1675 return -EINVAL; 1676 1677 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1); 1678 if (!ndev) 1679 return -ENOMEM; 1680 1681 SET_NETDEV_DEV(ndev, &pdev->dev); 1682 ether_setup(ndev); 1683 1684 rdev = netdev_priv(ndev); 1685 rdev->ndev = ndev; 1686 rdev->priv = priv; 1687 priv->rdev[index] = rdev; 1688 rdev->port = index; 1689 rdev->etha = &priv->etha[index]; 1690 rdev->addr = priv->addr; 1691 1692 ndev->base_addr = (unsigned long)rdev->addr; 1693 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); 1694 ndev->netdev_ops = &rswitch_netdev_ops; 1695 ndev->ethtool_ops = &rswitch_ethtool_ops; 1696 1697 netif_napi_add(ndev, &rdev->napi, rswitch_poll); 1698 1699 rdev->np_port = rswitch_get_port_node(rdev); 1700 rdev->disabled = !rdev->np_port; 1701 err = of_get_ethdev_address(rdev->np_port, ndev); 1702 of_node_put(rdev->np_port); 1703 if (err) { 1704 if (is_valid_ether_addr(rdev->etha->mac_addr)) 1705 eth_hw_addr_set(ndev, rdev->etha->mac_addr); 1706 else 1707 eth_hw_addr_random(ndev); 1708 } 1709 1710 err = rswitch_etha_get_params(rdev); 1711 if (err < 0) 1712 goto out_get_params; 1713 1714 if (rdev->priv->gwca.speed < rdev->etha->speed) 1715 rdev->priv->gwca.speed = rdev->etha->speed; 1716 1717 err = rswitch_rxdmac_alloc(ndev); 1718 if (err < 0) 1719 goto out_rxdmac; 1720 1721 err = rswitch_txdmac_alloc(ndev); 1722 if (err < 0) 1723 goto out_txdmac; 1724 1725 return 0; 1726 1727 out_txdmac: 1728 rswitch_rxdmac_free(ndev); 1729 1730 out_rxdmac: 1731 out_get_params: 1732 netif_napi_del(&rdev->napi); 1733 free_netdev(ndev); 1734 1735 return err; 1736 } 1737 1738 static void rswitch_device_free(struct rswitch_private *priv, int index) 1739 { 1740 struct rswitch_device *rdev = priv->rdev[index]; 1741 struct net_device *ndev = rdev->ndev; 1742 1743 rswitch_txdmac_free(ndev); 1744 rswitch_rxdmac_free(ndev); 1745 netif_napi_del(&rdev->napi); 1746 free_netdev(ndev); 1747 } 1748 1749 static int rswitch_init(struct rswitch_private *priv) 1750 { 1751 int i, err; 1752 1753 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1754 rswitch_etha_init(priv, i); 1755 1756 rswitch_clock_enable(priv); 1757 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1758 rswitch_etha_read_mac_address(&priv->etha[i]); 1759 1760 rswitch_reset(priv); 1761 1762 rswitch_clock_enable(priv); 1763 rswitch_top_init(priv); 1764 err = rswitch_bpool_config(priv); 1765 if (err < 0) 1766 return err; 1767 1768 err = rswitch_gwca_linkfix_alloc(priv); 1769 if (err < 0) 1770 return -ENOMEM; 1771 1772 err = rswitch_gwca_ts_queue_alloc(priv); 1773 if (err < 0) 1774 goto err_ts_queue_alloc; 1775 1776 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); 1777 INIT_LIST_HEAD(&priv->gwca.ts_info_list); 1778 1779 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1780 err = rswitch_device_alloc(priv, i); 1781 if (err < 0) { 1782 for (i--; i >= 0; i--) 1783 rswitch_device_free(priv, i); 1784 goto err_device_alloc; 1785 } 1786 } 1787 1788 rswitch_fwd_init(priv); 1789 1790 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4, 1791 RCAR_GEN4_PTP_CLOCK_S4); 1792 if (err < 0) 1793 goto err_ptp_register; 1794 1795 err = rswitch_gwca_request_irqs(priv); 1796 if (err < 0) 1797 goto err_gwca_request_irq; 1798 1799 err = rswitch_gwca_ts_request_irqs(priv); 1800 if (err < 0) 1801 goto err_gwca_ts_request_irq; 1802 1803 err = rswitch_gwca_hw_init(priv); 1804 if (err < 0) 1805 goto err_gwca_hw_init; 1806 1807 err = rswitch_ether_port_init_all(priv); 1808 if (err) 1809 goto err_ether_port_init_all; 1810 1811 rswitch_for_each_enabled_port(priv, i) { 1812 err = register_netdev(priv->rdev[i]->ndev); 1813 if (err) { 1814 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1815 unregister_netdev(priv->rdev[i]->ndev); 1816 goto err_register_netdev; 1817 } 1818 } 1819 1820 rswitch_for_each_enabled_port(priv, i) 1821 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", 1822 priv->rdev[i]->ndev->dev_addr); 1823 1824 return 0; 1825 1826 err_register_netdev: 1827 rswitch_ether_port_deinit_all(priv); 1828 1829 err_ether_port_init_all: 1830 rswitch_gwca_hw_deinit(priv); 1831 1832 err_gwca_hw_init: 1833 err_gwca_ts_request_irq: 1834 err_gwca_request_irq: 1835 rcar_gen4_ptp_unregister(priv->ptp_priv); 1836 1837 err_ptp_register: 1838 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1839 rswitch_device_free(priv, i); 1840 1841 err_device_alloc: 1842 rswitch_gwca_ts_queue_free(priv); 1843 1844 err_ts_queue_alloc: 1845 rswitch_gwca_linkfix_free(priv); 1846 1847 return err; 1848 } 1849 1850 static int renesas_eth_sw_probe(struct platform_device *pdev) 1851 { 1852 struct rswitch_private *priv; 1853 struct resource *res; 1854 int ret; 1855 1856 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base"); 1857 if (!res) { 1858 dev_err(&pdev->dev, "invalid resource\n"); 1859 return -EINVAL; 1860 } 1861 1862 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 1863 if (!priv) 1864 return -ENOMEM; 1865 1866 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); 1867 if (!priv->ptp_priv) 1868 return -ENOMEM; 1869 1870 platform_set_drvdata(pdev, priv); 1871 priv->pdev = pdev; 1872 priv->addr = devm_ioremap_resource(&pdev->dev, res); 1873 if (IS_ERR(priv->addr)) 1874 return PTR_ERR(priv->addr); 1875 1876 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; 1877 1878 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 1879 if (ret < 0) { 1880 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1881 if (ret < 0) 1882 return ret; 1883 } 1884 1885 priv->gwca.index = AGENT_INDEX_GWCA; 1886 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, 1887 RSWITCH_MAX_NUM_QUEUES); 1888 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, 1889 sizeof(*priv->gwca.queues), GFP_KERNEL); 1890 if (!priv->gwca.queues) 1891 return -ENOMEM; 1892 1893 pm_runtime_enable(&pdev->dev); 1894 pm_runtime_get_sync(&pdev->dev); 1895 1896 ret = rswitch_init(priv); 1897 if (ret < 0) { 1898 pm_runtime_put(&pdev->dev); 1899 pm_runtime_disable(&pdev->dev); 1900 return ret; 1901 } 1902 1903 device_set_wakeup_capable(&pdev->dev, 1); 1904 1905 return ret; 1906 } 1907 1908 static void rswitch_deinit(struct rswitch_private *priv) 1909 { 1910 int i; 1911 1912 rswitch_gwca_hw_deinit(priv); 1913 rcar_gen4_ptp_unregister(priv->ptp_priv); 1914 1915 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1916 struct rswitch_device *rdev = priv->rdev[i]; 1917 1918 phy_exit(priv->rdev[i]->serdes); 1919 rswitch_ether_port_deinit_one(rdev); 1920 unregister_netdev(rdev->ndev); 1921 rswitch_device_free(priv, i); 1922 } 1923 1924 rswitch_gwca_ts_queue_free(priv); 1925 rswitch_gwca_linkfix_free(priv); 1926 1927 rswitch_clock_disable(priv); 1928 } 1929 1930 static int renesas_eth_sw_remove(struct platform_device *pdev) 1931 { 1932 struct rswitch_private *priv = platform_get_drvdata(pdev); 1933 1934 rswitch_deinit(priv); 1935 1936 pm_runtime_put(&pdev->dev); 1937 pm_runtime_disable(&pdev->dev); 1938 1939 platform_set_drvdata(pdev, NULL); 1940 1941 return 0; 1942 } 1943 1944 static struct platform_driver renesas_eth_sw_driver_platform = { 1945 .probe = renesas_eth_sw_probe, 1946 .remove = renesas_eth_sw_remove, 1947 .driver = { 1948 .name = "renesas_eth_sw", 1949 .of_match_table = renesas_eth_sw_of_table, 1950 } 1951 }; 1952 module_platform_driver(renesas_eth_sw_driver_platform); 1953 MODULE_AUTHOR("Yoshihiro Shimoda"); 1954 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver"); 1955 MODULE_LICENSE("GPL"); 1956