1 // SPDX-License-Identifier: GPL-2.0 2 /* Renesas Ethernet Switch device driver 3 * 4 * Copyright (C) 2022 Renesas Electronics Corporation 5 */ 6 7 #include <linux/dma-mapping.h> 8 #include <linux/err.h> 9 #include <linux/etherdevice.h> 10 #include <linux/iopoll.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/net_tstamp.h> 14 #include <linux/of.h> 15 #include <linux/of_mdio.h> 16 #include <linux/of_net.h> 17 #include <linux/phy/phy.h> 18 #include <linux/platform_device.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/rtnetlink.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 24 #include "rswitch.h" 25 26 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) 27 { 28 u32 val; 29 30 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected, 31 1, RSWITCH_TIMEOUT_US); 32 } 33 34 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set) 35 { 36 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg); 37 } 38 39 /* Common Agent block (COMA) */ 40 static void rswitch_reset(struct rswitch_private *priv) 41 { 42 iowrite32(RRC_RR, priv->addr + RRC); 43 iowrite32(RRC_RR_CLR, priv->addr + RRC); 44 } 45 46 static void rswitch_clock_enable(struct rswitch_private *priv) 47 { 48 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); 49 } 50 51 static void rswitch_clock_disable(struct rswitch_private *priv) 52 { 53 iowrite32(RCDC_RCD, priv->addr + RCDC); 54 } 55 56 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port) 57 { 58 u32 val = ioread32(coma_addr + RCEC); 59 60 if (val & RCEC_RCE) 61 return (val & BIT(port)) ? true : false; 62 else 63 return false; 64 } 65 66 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable) 67 { 68 u32 val; 69 70 if (enable) { 71 val = ioread32(coma_addr + RCEC); 72 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC); 73 } else { 74 val = ioread32(coma_addr + RCDC); 75 iowrite32(val | BIT(port), coma_addr + RCDC); 76 } 77 } 78 79 static int rswitch_bpool_config(struct rswitch_private *priv) 80 { 81 u32 val; 82 83 val = ioread32(priv->addr + CABPIRM); 84 if (val & CABPIRM_BPR) 85 return 0; 86 87 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); 88 89 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); 90 } 91 92 static void rswitch_coma_init(struct rswitch_private *priv) 93 { 94 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); 95 } 96 97 /* R-Switch-2 block (TOP) */ 98 static void rswitch_top_init(struct rswitch_private *priv) 99 { 100 int i; 101 102 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++) 103 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); 104 } 105 106 /* Forwarding engine block (MFWD) */ 107 static void rswitch_fwd_init(struct rswitch_private *priv) 108 { 109 int i; 110 111 /* For ETHA */ 112 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 113 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i)); 114 iowrite32(0, priv->addr + FWPBFC(i)); 115 } 116 117 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 118 iowrite32(priv->rdev[i]->rx_queue->index, 119 priv->addr + FWPBFCSDC(GWCA_INDEX, i)); 120 iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i)); 121 } 122 123 /* For GWCA */ 124 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index)); 125 iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index)); 126 iowrite32(0, priv->addr + FWPBFC(priv->gwca.index)); 127 iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index)); 128 } 129 130 /* Gateway CPU agent block (GWCA) */ 131 static int rswitch_gwca_change_mode(struct rswitch_private *priv, 132 enum rswitch_gwca_mode mode) 133 { 134 int ret; 135 136 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) 137 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); 138 139 iowrite32(mode, priv->addr + GWMC); 140 141 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); 142 143 if (mode == GWMC_OPC_DISABLE) 144 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); 145 146 return ret; 147 } 148 149 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv) 150 { 151 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); 152 153 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); 154 } 155 156 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv) 157 { 158 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); 159 160 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); 161 } 162 163 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx) 164 { 165 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; 166 int i; 167 168 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 169 if (dis[i] & mask[i]) 170 return true; 171 } 172 173 return false; 174 } 175 176 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis) 177 { 178 int i; 179 180 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 181 dis[i] = ioread32(priv->addr + GWDIS(i)); 182 dis[i] &= ioread32(priv->addr + GWDIE(i)); 183 } 184 } 185 186 static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable) 187 { 188 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32); 189 190 iowrite32(BIT(index % 32), priv->addr + offs); 191 } 192 193 static void rswitch_ack_data_irq(struct rswitch_private *priv, int index) 194 { 195 u32 offs = GWDIS(index / 32); 196 197 iowrite32(BIT(index % 32), priv->addr + offs); 198 } 199 200 static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num) 201 { 202 int index = cur ? gq->cur : gq->dirty; 203 204 if (index + num >= gq->ring_size) 205 index = (index + num) % gq->ring_size; 206 else 207 index += num; 208 209 return index; 210 } 211 212 static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) 213 { 214 if (gq->cur >= gq->dirty) 215 return gq->cur - gq->dirty; 216 else 217 return gq->ring_size - gq->dirty + gq->cur; 218 } 219 220 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) 221 { 222 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; 223 224 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 225 return true; 226 227 return false; 228 } 229 230 static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq, 231 int start_index, int num) 232 { 233 int i, index; 234 235 for (i = 0; i < num; i++) { 236 index = (i + start_index) % gq->ring_size; 237 if (gq->skbs[index]) 238 continue; 239 gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev, 240 PKT_BUF_SZ + RSWITCH_ALIGN - 1); 241 if (!gq->skbs[index]) 242 goto err; 243 } 244 245 return 0; 246 247 err: 248 for (i--; i >= 0; i--) { 249 index = (i + start_index) % gq->ring_size; 250 dev_kfree_skb(gq->skbs[index]); 251 gq->skbs[index] = NULL; 252 } 253 254 return -ENOMEM; 255 } 256 257 static void rswitch_gwca_queue_free(struct net_device *ndev, 258 struct rswitch_gwca_queue *gq) 259 { 260 int i; 261 262 if (!gq->dir_tx) { 263 dma_free_coherent(ndev->dev.parent, 264 sizeof(struct rswitch_ext_ts_desc) * 265 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); 266 gq->rx_ring = NULL; 267 268 for (i = 0; i < gq->ring_size; i++) 269 dev_kfree_skb(gq->skbs[i]); 270 } else { 271 dma_free_coherent(ndev->dev.parent, 272 sizeof(struct rswitch_ext_desc) * 273 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); 274 gq->tx_ring = NULL; 275 } 276 277 kfree(gq->skbs); 278 gq->skbs = NULL; 279 } 280 281 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) 282 { 283 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 284 285 dma_free_coherent(&priv->pdev->dev, 286 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), 287 gq->ts_ring, gq->ring_dma); 288 gq->ts_ring = NULL; 289 } 290 291 static int rswitch_gwca_queue_alloc(struct net_device *ndev, 292 struct rswitch_private *priv, 293 struct rswitch_gwca_queue *gq, 294 bool dir_tx, int ring_size) 295 { 296 int i, bit; 297 298 gq->dir_tx = dir_tx; 299 gq->ring_size = ring_size; 300 gq->ndev = ndev; 301 302 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); 303 if (!gq->skbs) 304 return -ENOMEM; 305 306 if (!dir_tx) { 307 rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size); 308 309 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, 310 sizeof(struct rswitch_ext_ts_desc) * 311 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 312 } else { 313 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, 314 sizeof(struct rswitch_ext_desc) * 315 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 316 } 317 318 if (!gq->rx_ring && !gq->tx_ring) 319 goto out; 320 321 i = gq->index / 32; 322 bit = BIT(gq->index % 32); 323 if (dir_tx) 324 priv->gwca.tx_irq_bits[i] |= bit; 325 else 326 priv->gwca.rx_irq_bits[i] |= bit; 327 328 return 0; 329 330 out: 331 rswitch_gwca_queue_free(ndev, gq); 332 333 return -ENOMEM; 334 } 335 336 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) 337 { 338 desc->dptrl = cpu_to_le32(lower_32_bits(addr)); 339 desc->dptrh = upper_32_bits(addr) & 0xff; 340 } 341 342 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc) 343 { 344 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; 345 } 346 347 static int rswitch_gwca_queue_format(struct net_device *ndev, 348 struct rswitch_private *priv, 349 struct rswitch_gwca_queue *gq) 350 { 351 int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; 352 struct rswitch_ext_desc *desc; 353 struct rswitch_desc *linkfix; 354 dma_addr_t dma_addr; 355 int i; 356 357 memset(gq->tx_ring, 0, ring_size); 358 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { 359 if (!gq->dir_tx) { 360 dma_addr = dma_map_single(ndev->dev.parent, 361 gq->skbs[i]->data, PKT_BUF_SZ, 362 DMA_FROM_DEVICE); 363 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 364 goto err; 365 366 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); 367 rswitch_desc_set_dptr(&desc->desc, dma_addr); 368 desc->desc.die_dt = DT_FEMPTY | DIE; 369 } else { 370 desc->desc.die_dt = DT_EEMPTY | DIE; 371 } 372 } 373 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 374 desc->desc.die_dt = DT_LINKFIX; 375 376 linkfix = &priv->gwca.linkfix_table[gq->index]; 377 linkfix->die_dt = DT_LINKFIX; 378 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 379 380 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, 381 priv->addr + GWDCC_OFFS(gq->index)); 382 383 return 0; 384 385 err: 386 if (!gq->dir_tx) { 387 for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) { 388 dma_addr = rswitch_desc_get_dptr(&desc->desc); 389 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, 390 DMA_FROM_DEVICE); 391 } 392 } 393 394 return -ENOMEM; 395 } 396 397 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, 398 int start_index, int num) 399 { 400 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 401 struct rswitch_ts_desc *desc; 402 int i, index; 403 404 for (i = 0; i < num; i++) { 405 index = (i + start_index) % gq->ring_size; 406 desc = &gq->ts_ring[index]; 407 desc->desc.die_dt = DT_FEMPTY_ND | DIE; 408 } 409 } 410 411 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, 412 struct rswitch_gwca_queue *gq, 413 int start_index, int num) 414 { 415 struct rswitch_device *rdev = netdev_priv(ndev); 416 struct rswitch_ext_ts_desc *desc; 417 dma_addr_t dma_addr; 418 int i, index; 419 420 for (i = 0; i < num; i++) { 421 index = (i + start_index) % gq->ring_size; 422 desc = &gq->rx_ring[index]; 423 if (!gq->dir_tx) { 424 dma_addr = dma_map_single(ndev->dev.parent, 425 gq->skbs[index]->data, PKT_BUF_SZ, 426 DMA_FROM_DEVICE); 427 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 428 goto err; 429 430 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); 431 rswitch_desc_set_dptr(&desc->desc, dma_addr); 432 dma_wmb(); 433 desc->desc.die_dt = DT_FEMPTY | DIE; 434 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); 435 } else { 436 desc->desc.die_dt = DT_EEMPTY | DIE; 437 } 438 } 439 440 return 0; 441 442 err: 443 if (!gq->dir_tx) { 444 for (i--; i >= 0; i--) { 445 index = (i + start_index) % gq->ring_size; 446 desc = &gq->rx_ring[index]; 447 dma_addr = rswitch_desc_get_dptr(&desc->desc); 448 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, 449 DMA_FROM_DEVICE); 450 } 451 } 452 453 return -ENOMEM; 454 } 455 456 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, 457 struct rswitch_private *priv, 458 struct rswitch_gwca_queue *gq) 459 { 460 int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; 461 struct rswitch_ext_ts_desc *desc; 462 struct rswitch_desc *linkfix; 463 int err; 464 465 memset(gq->rx_ring, 0, ring_size); 466 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); 467 if (err < 0) 468 return err; 469 470 desc = &gq->rx_ring[gq->ring_size]; /* Last */ 471 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 472 desc->desc.die_dt = DT_LINKFIX; 473 474 linkfix = &priv->gwca.linkfix_table[gq->index]; 475 linkfix->die_dt = DT_LINKFIX; 476 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 477 478 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | 479 GWDCC_ETS | GWDCC_EDE, 480 priv->addr + GWDCC_OFFS(gq->index)); 481 482 return 0; 483 } 484 485 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) 486 { 487 int i, num_queues = priv->gwca.num_queues; 488 struct rswitch_gwca *gwca = &priv->gwca; 489 struct device *dev = &priv->pdev->dev; 490 491 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; 492 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, 493 &gwca->linkfix_table_dma, GFP_KERNEL); 494 if (!gwca->linkfix_table) 495 return -ENOMEM; 496 for (i = 0; i < num_queues; i++) 497 gwca->linkfix_table[i].die_dt = DT_EOS; 498 499 return 0; 500 } 501 502 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) 503 { 504 struct rswitch_gwca *gwca = &priv->gwca; 505 506 if (gwca->linkfix_table) 507 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, 508 gwca->linkfix_table, gwca->linkfix_table_dma); 509 gwca->linkfix_table = NULL; 510 } 511 512 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) 513 { 514 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 515 struct rswitch_ts_desc *desc; 516 517 gq->ring_size = TS_RING_SIZE; 518 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, 519 sizeof(struct rswitch_ts_desc) * 520 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 521 522 if (!gq->ts_ring) 523 return -ENOMEM; 524 525 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); 526 desc = &gq->ts_ring[gq->ring_size]; 527 desc->desc.die_dt = DT_LINKFIX; 528 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 529 INIT_LIST_HEAD(&priv->gwca.ts_info_list); 530 531 return 0; 532 } 533 534 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) 535 { 536 struct rswitch_gwca_queue *gq; 537 int index; 538 539 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); 540 if (index >= priv->gwca.num_queues) 541 return NULL; 542 set_bit(index, priv->gwca.used); 543 gq = &priv->gwca.queues[index]; 544 memset(gq, 0, sizeof(*gq)); 545 gq->index = index; 546 547 return gq; 548 } 549 550 static void rswitch_gwca_put(struct rswitch_private *priv, 551 struct rswitch_gwca_queue *gq) 552 { 553 clear_bit(gq->index, priv->gwca.used); 554 } 555 556 static int rswitch_txdmac_alloc(struct net_device *ndev) 557 { 558 struct rswitch_device *rdev = netdev_priv(ndev); 559 struct rswitch_private *priv = rdev->priv; 560 int err; 561 562 rdev->tx_queue = rswitch_gwca_get(priv); 563 if (!rdev->tx_queue) 564 return -EBUSY; 565 566 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); 567 if (err < 0) { 568 rswitch_gwca_put(priv, rdev->tx_queue); 569 return err; 570 } 571 572 return 0; 573 } 574 575 static void rswitch_txdmac_free(struct net_device *ndev) 576 { 577 struct rswitch_device *rdev = netdev_priv(ndev); 578 579 rswitch_gwca_queue_free(ndev, rdev->tx_queue); 580 rswitch_gwca_put(rdev->priv, rdev->tx_queue); 581 } 582 583 static int rswitch_txdmac_init(struct rswitch_private *priv, int index) 584 { 585 struct rswitch_device *rdev = priv->rdev[index]; 586 587 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); 588 } 589 590 static int rswitch_rxdmac_alloc(struct net_device *ndev) 591 { 592 struct rswitch_device *rdev = netdev_priv(ndev); 593 struct rswitch_private *priv = rdev->priv; 594 int err; 595 596 rdev->rx_queue = rswitch_gwca_get(priv); 597 if (!rdev->rx_queue) 598 return -EBUSY; 599 600 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); 601 if (err < 0) { 602 rswitch_gwca_put(priv, rdev->rx_queue); 603 return err; 604 } 605 606 return 0; 607 } 608 609 static void rswitch_rxdmac_free(struct net_device *ndev) 610 { 611 struct rswitch_device *rdev = netdev_priv(ndev); 612 613 rswitch_gwca_queue_free(ndev, rdev->rx_queue); 614 rswitch_gwca_put(rdev->priv, rdev->rx_queue); 615 } 616 617 static int rswitch_rxdmac_init(struct rswitch_private *priv, int index) 618 { 619 struct rswitch_device *rdev = priv->rdev[index]; 620 struct net_device *ndev = rdev->ndev; 621 622 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); 623 } 624 625 static int rswitch_gwca_hw_init(struct rswitch_private *priv) 626 { 627 int i, err; 628 629 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 630 if (err < 0) 631 return err; 632 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG); 633 if (err < 0) 634 return err; 635 636 err = rswitch_gwca_mcast_table_reset(priv); 637 if (err < 0) 638 return err; 639 err = rswitch_gwca_axi_ram_reset(priv); 640 if (err < 0) 641 return err; 642 643 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); 644 iowrite32(0, priv->addr + GWTTFC); 645 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); 646 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); 647 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); 648 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); 649 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); 650 651 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); 652 653 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 654 err = rswitch_rxdmac_init(priv, i); 655 if (err < 0) 656 return err; 657 err = rswitch_txdmac_init(priv, i); 658 if (err < 0) 659 return err; 660 } 661 662 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 663 if (err < 0) 664 return err; 665 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION); 666 } 667 668 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv) 669 { 670 int err; 671 672 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 673 if (err < 0) 674 return err; 675 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET); 676 if (err < 0) 677 return err; 678 679 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 680 } 681 682 static int rswitch_gwca_halt(struct rswitch_private *priv) 683 { 684 int err; 685 686 priv->gwca_halt = true; 687 err = rswitch_gwca_hw_deinit(priv); 688 dev_err(&priv->pdev->dev, "halted (%d)\n", err); 689 690 return err; 691 } 692 693 static bool rswitch_rx(struct net_device *ndev, int *quota) 694 { 695 struct rswitch_device *rdev = netdev_priv(ndev); 696 struct rswitch_gwca_queue *gq = rdev->rx_queue; 697 struct rswitch_ext_ts_desc *desc; 698 int limit, boguscnt, num, ret; 699 struct sk_buff *skb; 700 dma_addr_t dma_addr; 701 u16 pkt_len; 702 u32 get_ts; 703 704 if (*quota <= 0) 705 return true; 706 707 boguscnt = min_t(int, gq->ring_size, *quota); 708 limit = boguscnt; 709 710 desc = &gq->rx_ring[gq->cur]; 711 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { 712 dma_rmb(); 713 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; 714 skb = gq->skbs[gq->cur]; 715 gq->skbs[gq->cur] = NULL; 716 dma_addr = rswitch_desc_get_dptr(&desc->desc); 717 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE); 718 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 719 if (get_ts) { 720 struct skb_shared_hwtstamps *shhwtstamps; 721 struct timespec64 ts; 722 723 shhwtstamps = skb_hwtstamps(skb); 724 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 725 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 726 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 727 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); 728 } 729 skb_put(skb, pkt_len); 730 skb->protocol = eth_type_trans(skb, ndev); 731 napi_gro_receive(&rdev->napi, skb); 732 rdev->ndev->stats.rx_packets++; 733 rdev->ndev->stats.rx_bytes += pkt_len; 734 735 gq->cur = rswitch_next_queue_index(gq, true, 1); 736 desc = &gq->rx_ring[gq->cur]; 737 738 if (--boguscnt <= 0) 739 break; 740 } 741 742 num = rswitch_get_num_cur_queues(gq); 743 ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num); 744 if (ret < 0) 745 goto err; 746 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); 747 if (ret < 0) 748 goto err; 749 gq->dirty = rswitch_next_queue_index(gq, false, num); 750 751 *quota -= limit - boguscnt; 752 753 return boguscnt <= 0; 754 755 err: 756 rswitch_gwca_halt(rdev->priv); 757 758 return 0; 759 } 760 761 static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only) 762 { 763 struct rswitch_device *rdev = netdev_priv(ndev); 764 struct rswitch_gwca_queue *gq = rdev->tx_queue; 765 struct rswitch_ext_desc *desc; 766 dma_addr_t dma_addr; 767 struct sk_buff *skb; 768 int free_num = 0; 769 int size; 770 771 for (; rswitch_get_num_cur_queues(gq) > 0; 772 gq->dirty = rswitch_next_queue_index(gq, false, 1)) { 773 desc = &gq->tx_ring[gq->dirty]; 774 if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 775 break; 776 777 dma_rmb(); 778 size = le16_to_cpu(desc->desc.info_ds) & TX_DS; 779 skb = gq->skbs[gq->dirty]; 780 if (skb) { 781 dma_addr = rswitch_desc_get_dptr(&desc->desc); 782 dma_unmap_single(ndev->dev.parent, dma_addr, 783 size, DMA_TO_DEVICE); 784 dev_kfree_skb_any(gq->skbs[gq->dirty]); 785 gq->skbs[gq->dirty] = NULL; 786 free_num++; 787 } 788 desc->desc.die_dt = DT_EEMPTY; 789 rdev->ndev->stats.tx_packets++; 790 rdev->ndev->stats.tx_bytes += size; 791 } 792 793 return free_num; 794 } 795 796 static int rswitch_poll(struct napi_struct *napi, int budget) 797 { 798 struct net_device *ndev = napi->dev; 799 struct rswitch_private *priv; 800 struct rswitch_device *rdev; 801 int quota = budget; 802 803 rdev = netdev_priv(ndev); 804 priv = rdev->priv; 805 806 retry: 807 rswitch_tx_free(ndev, true); 808 809 if (rswitch_rx(ndev, "a)) 810 goto out; 811 else if (rdev->priv->gwca_halt) 812 goto err; 813 else if (rswitch_is_queue_rxed(rdev->rx_queue)) 814 goto retry; 815 816 netif_wake_subqueue(ndev, 0); 817 818 napi_complete(napi); 819 820 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); 821 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); 822 823 out: 824 return budget - quota; 825 826 err: 827 napi_complete(napi); 828 829 return 0; 830 } 831 832 static void rswitch_queue_interrupt(struct net_device *ndev) 833 { 834 struct rswitch_device *rdev = netdev_priv(ndev); 835 836 if (napi_schedule_prep(&rdev->napi)) { 837 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 838 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 839 __napi_schedule(&rdev->napi); 840 } 841 } 842 843 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) 844 { 845 struct rswitch_gwca_queue *gq; 846 int i, index, bit; 847 848 for (i = 0; i < priv->gwca.num_queues; i++) { 849 gq = &priv->gwca.queues[i]; 850 index = gq->index / 32; 851 bit = BIT(gq->index % 32); 852 if (!(dis[index] & bit)) 853 continue; 854 855 rswitch_ack_data_irq(priv, gq->index); 856 rswitch_queue_interrupt(gq->ndev); 857 } 858 859 return IRQ_HANDLED; 860 } 861 862 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id) 863 { 864 struct rswitch_private *priv = dev_id; 865 u32 dis[RSWITCH_NUM_IRQ_REGS]; 866 irqreturn_t ret = IRQ_NONE; 867 868 rswitch_get_data_irq_status(priv, dis); 869 870 if (rswitch_is_any_data_irq(priv, dis, true) || 871 rswitch_is_any_data_irq(priv, dis, false)) 872 ret = rswitch_data_irq(priv, dis); 873 874 return ret; 875 } 876 877 static int rswitch_gwca_request_irqs(struct rswitch_private *priv) 878 { 879 char *resource_name, *irq_name; 880 int i, ret, irq; 881 882 for (i = 0; i < GWCA_NUM_IRQS; i++) { 883 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i); 884 if (!resource_name) 885 return -ENOMEM; 886 887 irq = platform_get_irq_byname(priv->pdev, resource_name); 888 kfree(resource_name); 889 if (irq < 0) 890 return irq; 891 892 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, 893 GWCA_IRQ_NAME, i); 894 if (!irq_name) 895 return -ENOMEM; 896 897 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, 898 0, irq_name, priv); 899 if (ret < 0) 900 return ret; 901 } 902 903 return 0; 904 } 905 906 static void rswitch_ts(struct rswitch_private *priv) 907 { 908 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 909 struct rswitch_gwca_ts_info *ts_info, *ts_info2; 910 struct skb_shared_hwtstamps shhwtstamps; 911 struct rswitch_ts_desc *desc; 912 struct timespec64 ts; 913 u32 tag, port; 914 int num; 915 916 desc = &gq->ts_ring[gq->cur]; 917 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { 918 dma_rmb(); 919 920 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); 921 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); 922 923 list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) { 924 if (!(ts_info->port == port && ts_info->tag == tag)) 925 continue; 926 927 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 928 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 929 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 930 shhwtstamps.hwtstamp = timespec64_to_ktime(ts); 931 skb_tstamp_tx(ts_info->skb, &shhwtstamps); 932 dev_consume_skb_irq(ts_info->skb); 933 list_del(&ts_info->list); 934 kfree(ts_info); 935 break; 936 } 937 938 gq->cur = rswitch_next_queue_index(gq, true, 1); 939 desc = &gq->ts_ring[gq->cur]; 940 } 941 942 num = rswitch_get_num_cur_queues(gq); 943 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); 944 gq->dirty = rswitch_next_queue_index(gq, false, num); 945 } 946 947 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) 948 { 949 struct rswitch_private *priv = dev_id; 950 951 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { 952 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); 953 rswitch_ts(priv); 954 955 return IRQ_HANDLED; 956 } 957 958 return IRQ_NONE; 959 } 960 961 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) 962 { 963 int irq; 964 965 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); 966 if (irq < 0) 967 return irq; 968 969 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, 970 0, GWCA_TS_IRQ_NAME, priv); 971 } 972 973 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ 974 static int rswitch_etha_change_mode(struct rswitch_etha *etha, 975 enum rswitch_etha_mode mode) 976 { 977 int ret; 978 979 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) 980 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); 981 982 iowrite32(mode, etha->addr + EAMC); 983 984 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); 985 986 if (mode == EAMC_OPC_DISABLE) 987 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); 988 989 return ret; 990 } 991 992 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha) 993 { 994 u32 mrmac0 = ioread32(etha->addr + MRMAC0); 995 u32 mrmac1 = ioread32(etha->addr + MRMAC1); 996 u8 *mac = ða->mac_addr[0]; 997 998 mac[0] = (mrmac0 >> 8) & 0xFF; 999 mac[1] = (mrmac0 >> 0) & 0xFF; 1000 mac[2] = (mrmac1 >> 24) & 0xFF; 1001 mac[3] = (mrmac1 >> 16) & 0xFF; 1002 mac[4] = (mrmac1 >> 8) & 0xFF; 1003 mac[5] = (mrmac1 >> 0) & 0xFF; 1004 } 1005 1006 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac) 1007 { 1008 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); 1009 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], 1010 etha->addr + MRMAC1); 1011 } 1012 1013 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) 1014 { 1015 iowrite32(MLVC_PLV, etha->addr + MLVC); 1016 1017 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); 1018 } 1019 1020 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) 1021 { 1022 u32 val; 1023 1024 rswitch_etha_write_mac_address(etha, mac); 1025 1026 switch (etha->speed) { 1027 case 100: 1028 val = MPIC_LSC_100M; 1029 break; 1030 case 1000: 1031 val = MPIC_LSC_1G; 1032 break; 1033 case 2500: 1034 val = MPIC_LSC_2_5G; 1035 break; 1036 default: 1037 return; 1038 } 1039 1040 iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC); 1041 } 1042 1043 static void rswitch_etha_enable_mii(struct rswitch_etha *etha) 1044 { 1045 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, 1046 MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06)); 1047 rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); 1048 } 1049 1050 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) 1051 { 1052 int err; 1053 1054 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1055 if (err < 0) 1056 return err; 1057 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG); 1058 if (err < 0) 1059 return err; 1060 1061 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); 1062 rswitch_rmac_setting(etha, mac); 1063 rswitch_etha_enable_mii(etha); 1064 1065 err = rswitch_etha_wait_link_verification(etha); 1066 if (err < 0) 1067 return err; 1068 1069 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1070 if (err < 0) 1071 return err; 1072 1073 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION); 1074 } 1075 1076 static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read, 1077 int phyad, int devad, int regad, int data) 1078 { 1079 int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45; 1080 u32 val; 1081 int ret; 1082 1083 if (devad == 0xffffffff) 1084 return -ENODEV; 1085 1086 writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1); 1087 1088 val = MPSM_PSME | MPSM_MFF_C45; 1089 iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); 1090 1091 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); 1092 if (ret) 1093 return ret; 1094 1095 rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); 1096 1097 if (read) { 1098 writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); 1099 1100 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); 1101 if (ret) 1102 return ret; 1103 1104 ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16; 1105 1106 rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); 1107 } else { 1108 iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val, 1109 etha->addr + MPSM); 1110 1111 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS); 1112 } 1113 1114 return ret; 1115 } 1116 1117 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, 1118 int regad) 1119 { 1120 struct rswitch_etha *etha = bus->priv; 1121 1122 return rswitch_etha_set_access(etha, true, addr, devad, regad, 0); 1123 } 1124 1125 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, 1126 int regad, u16 val) 1127 { 1128 struct rswitch_etha *etha = bus->priv; 1129 1130 return rswitch_etha_set_access(etha, false, addr, devad, regad, val); 1131 } 1132 1133 /* Call of_node_put(port) after done */ 1134 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) 1135 { 1136 struct device_node *ports, *port; 1137 int err = 0; 1138 u32 index; 1139 1140 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, 1141 "ethernet-ports"); 1142 if (!ports) 1143 return NULL; 1144 1145 for_each_child_of_node(ports, port) { 1146 err = of_property_read_u32(port, "reg", &index); 1147 if (err < 0) { 1148 port = NULL; 1149 goto out; 1150 } 1151 if (index == rdev->etha->index) { 1152 if (!of_device_is_available(port)) 1153 port = NULL; 1154 break; 1155 } 1156 } 1157 1158 out: 1159 of_node_put(ports); 1160 1161 return port; 1162 } 1163 1164 static int rswitch_etha_get_params(struct rswitch_device *rdev) 1165 { 1166 u32 max_speed; 1167 int err; 1168 1169 if (!rdev->np_port) 1170 return 0; /* ignored */ 1171 1172 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); 1173 if (err) 1174 return err; 1175 1176 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); 1177 if (!err) { 1178 rdev->etha->speed = max_speed; 1179 return 0; 1180 } 1181 1182 /* if no "max-speed" property, let's use default speed */ 1183 switch (rdev->etha->phy_interface) { 1184 case PHY_INTERFACE_MODE_MII: 1185 rdev->etha->speed = SPEED_100; 1186 break; 1187 case PHY_INTERFACE_MODE_SGMII: 1188 rdev->etha->speed = SPEED_1000; 1189 break; 1190 case PHY_INTERFACE_MODE_USXGMII: 1191 rdev->etha->speed = SPEED_2500; 1192 break; 1193 default: 1194 return -EINVAL; 1195 } 1196 1197 return 0; 1198 } 1199 1200 static int rswitch_mii_register(struct rswitch_device *rdev) 1201 { 1202 struct device_node *mdio_np; 1203 struct mii_bus *mii_bus; 1204 int err; 1205 1206 mii_bus = mdiobus_alloc(); 1207 if (!mii_bus) 1208 return -ENOMEM; 1209 1210 mii_bus->name = "rswitch_mii"; 1211 sprintf(mii_bus->id, "etha%d", rdev->etha->index); 1212 mii_bus->priv = rdev->etha; 1213 mii_bus->read_c45 = rswitch_etha_mii_read_c45; 1214 mii_bus->write_c45 = rswitch_etha_mii_write_c45; 1215 mii_bus->parent = &rdev->priv->pdev->dev; 1216 1217 mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); 1218 err = of_mdiobus_register(mii_bus, mdio_np); 1219 if (err < 0) { 1220 mdiobus_free(mii_bus); 1221 goto out; 1222 } 1223 1224 rdev->etha->mii = mii_bus; 1225 1226 out: 1227 of_node_put(mdio_np); 1228 1229 return err; 1230 } 1231 1232 static void rswitch_mii_unregister(struct rswitch_device *rdev) 1233 { 1234 if (rdev->etha->mii) { 1235 mdiobus_unregister(rdev->etha->mii); 1236 mdiobus_free(rdev->etha->mii); 1237 rdev->etha->mii = NULL; 1238 } 1239 } 1240 1241 static void rswitch_adjust_link(struct net_device *ndev) 1242 { 1243 struct rswitch_device *rdev = netdev_priv(ndev); 1244 struct phy_device *phydev = ndev->phydev; 1245 1246 /* Current hardware has a restriction not to change speed at runtime */ 1247 if (phydev->link != rdev->etha->link) { 1248 phy_print_status(phydev); 1249 if (phydev->link) 1250 phy_power_on(rdev->serdes); 1251 else 1252 phy_power_off(rdev->serdes); 1253 1254 rdev->etha->link = phydev->link; 1255 } 1256 } 1257 1258 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, 1259 struct phy_device *phydev) 1260 { 1261 /* Current hardware has a restriction not to change speed at runtime */ 1262 switch (rdev->etha->speed) { 1263 case SPEED_2500: 1264 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1265 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1266 break; 1267 case SPEED_1000: 1268 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1269 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1270 break; 1271 case SPEED_100: 1272 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1273 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1274 break; 1275 default: 1276 break; 1277 } 1278 1279 phy_set_max_speed(phydev, rdev->etha->speed); 1280 } 1281 1282 static int rswitch_phy_device_init(struct rswitch_device *rdev) 1283 { 1284 struct phy_device *phydev; 1285 struct device_node *phy; 1286 int err = -ENOENT; 1287 1288 if (!rdev->np_port) 1289 return -ENODEV; 1290 1291 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); 1292 if (!phy) 1293 return -ENODEV; 1294 1295 /* Set phydev->host_interfaces before calling of_phy_connect() to 1296 * configure the PHY with the information of host_interfaces. 1297 */ 1298 phydev = of_phy_find_device(phy); 1299 if (!phydev) 1300 goto out; 1301 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); 1302 1303 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, 1304 rdev->etha->phy_interface); 1305 if (!phydev) 1306 goto out; 1307 1308 phy_set_max_speed(phydev, SPEED_2500); 1309 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 1310 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 1311 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 1312 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1313 rswitch_phy_remove_link_mode(rdev, phydev); 1314 1315 phy_attached_info(phydev); 1316 1317 err = 0; 1318 out: 1319 of_node_put(phy); 1320 1321 return err; 1322 } 1323 1324 static void rswitch_phy_device_deinit(struct rswitch_device *rdev) 1325 { 1326 if (rdev->ndev->phydev) 1327 phy_disconnect(rdev->ndev->phydev); 1328 } 1329 1330 static int rswitch_serdes_set_params(struct rswitch_device *rdev) 1331 { 1332 int err; 1333 1334 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, 1335 rdev->etha->phy_interface); 1336 if (err < 0) 1337 return err; 1338 1339 return phy_set_speed(rdev->serdes, rdev->etha->speed); 1340 } 1341 1342 static int rswitch_ether_port_init_one(struct rswitch_device *rdev) 1343 { 1344 int err; 1345 1346 if (!rdev->etha->operated) { 1347 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1348 if (err < 0) 1349 return err; 1350 rdev->etha->operated = true; 1351 } 1352 1353 err = rswitch_mii_register(rdev); 1354 if (err < 0) 1355 return err; 1356 1357 err = rswitch_phy_device_init(rdev); 1358 if (err < 0) 1359 goto err_phy_device_init; 1360 1361 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); 1362 if (IS_ERR(rdev->serdes)) { 1363 err = PTR_ERR(rdev->serdes); 1364 goto err_serdes_phy_get; 1365 } 1366 1367 err = rswitch_serdes_set_params(rdev); 1368 if (err < 0) 1369 goto err_serdes_set_params; 1370 1371 return 0; 1372 1373 err_serdes_set_params: 1374 err_serdes_phy_get: 1375 rswitch_phy_device_deinit(rdev); 1376 1377 err_phy_device_init: 1378 rswitch_mii_unregister(rdev); 1379 1380 return err; 1381 } 1382 1383 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) 1384 { 1385 rswitch_phy_device_deinit(rdev); 1386 rswitch_mii_unregister(rdev); 1387 } 1388 1389 static int rswitch_ether_port_init_all(struct rswitch_private *priv) 1390 { 1391 int i, err; 1392 1393 rswitch_for_each_enabled_port(priv, i) { 1394 err = rswitch_ether_port_init_one(priv->rdev[i]); 1395 if (err) 1396 goto err_init_one; 1397 } 1398 1399 rswitch_for_each_enabled_port(priv, i) { 1400 err = phy_init(priv->rdev[i]->serdes); 1401 if (err) 1402 goto err_serdes; 1403 } 1404 1405 return 0; 1406 1407 err_serdes: 1408 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1409 phy_exit(priv->rdev[i]->serdes); 1410 i = RSWITCH_NUM_PORTS; 1411 1412 err_init_one: 1413 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1414 rswitch_ether_port_deinit_one(priv->rdev[i]); 1415 1416 return err; 1417 } 1418 1419 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) 1420 { 1421 int i; 1422 1423 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1424 phy_exit(priv->rdev[i]->serdes); 1425 rswitch_ether_port_deinit_one(priv->rdev[i]); 1426 } 1427 } 1428 1429 static int rswitch_open(struct net_device *ndev) 1430 { 1431 struct rswitch_device *rdev = netdev_priv(ndev); 1432 1433 phy_start(ndev->phydev); 1434 1435 napi_enable(&rdev->napi); 1436 netif_start_queue(ndev); 1437 1438 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); 1439 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); 1440 1441 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1442 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); 1443 1444 bitmap_set(rdev->priv->opened_ports, rdev->port, 1); 1445 1446 return 0; 1447 }; 1448 1449 static int rswitch_stop(struct net_device *ndev) 1450 { 1451 struct rswitch_device *rdev = netdev_priv(ndev); 1452 struct rswitch_gwca_ts_info *ts_info, *ts_info2; 1453 1454 netif_tx_stop_all_queues(ndev); 1455 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); 1456 1457 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1458 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); 1459 1460 list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { 1461 if (ts_info->port != rdev->port) 1462 continue; 1463 dev_kfree_skb_irq(ts_info->skb); 1464 list_del(&ts_info->list); 1465 kfree(ts_info); 1466 } 1467 1468 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 1469 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 1470 1471 phy_stop(ndev->phydev); 1472 napi_disable(&rdev->napi); 1473 1474 return 0; 1475 }; 1476 1477 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1478 { 1479 struct rswitch_device *rdev = netdev_priv(ndev); 1480 struct rswitch_gwca_queue *gq = rdev->tx_queue; 1481 struct rswitch_ext_desc *desc; 1482 int ret = NETDEV_TX_OK; 1483 dma_addr_t dma_addr; 1484 1485 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { 1486 netif_stop_subqueue(ndev, 0); 1487 return NETDEV_TX_BUSY; 1488 } 1489 1490 if (skb_put_padto(skb, ETH_ZLEN)) 1491 return ret; 1492 1493 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); 1494 if (dma_mapping_error(ndev->dev.parent, dma_addr)) { 1495 dev_kfree_skb_any(skb); 1496 return ret; 1497 } 1498 1499 gq->skbs[gq->cur] = skb; 1500 desc = &gq->tx_ring[gq->cur]; 1501 rswitch_desc_set_dptr(&desc->desc, dma_addr); 1502 desc->desc.info_ds = cpu_to_le16(skb->len); 1503 1504 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | 1505 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT); 1506 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 1507 struct rswitch_gwca_ts_info *ts_info; 1508 1509 ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); 1510 if (!ts_info) { 1511 dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); 1512 return -ENOMEM; 1513 } 1514 1515 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1516 rdev->ts_tag++; 1517 desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC); 1518 1519 ts_info->skb = skb_get(skb); 1520 ts_info->port = rdev->port; 1521 ts_info->tag = rdev->ts_tag; 1522 list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list); 1523 1524 skb_tx_timestamp(skb); 1525 } 1526 1527 dma_wmb(); 1528 1529 desc->desc.die_dt = DT_FSINGLE | DIE; 1530 wmb(); /* gq->cur must be incremented after die_dt was set */ 1531 1532 gq->cur = rswitch_next_queue_index(gq, true, 1); 1533 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); 1534 1535 return ret; 1536 } 1537 1538 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) 1539 { 1540 return &ndev->stats; 1541 } 1542 1543 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) 1544 { 1545 struct rswitch_device *rdev = netdev_priv(ndev); 1546 struct rcar_gen4_ptp_private *ptp_priv; 1547 struct hwtstamp_config config; 1548 1549 ptp_priv = rdev->priv->ptp_priv; 1550 1551 config.flags = 0; 1552 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : 1553 HWTSTAMP_TX_OFF; 1554 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { 1555 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: 1556 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1557 break; 1558 case RCAR_GEN4_RXTSTAMP_TYPE_ALL: 1559 config.rx_filter = HWTSTAMP_FILTER_ALL; 1560 break; 1561 default: 1562 config.rx_filter = HWTSTAMP_FILTER_NONE; 1563 break; 1564 } 1565 1566 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1567 } 1568 1569 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) 1570 { 1571 struct rswitch_device *rdev = netdev_priv(ndev); 1572 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; 1573 struct hwtstamp_config config; 1574 u32 tstamp_tx_ctrl; 1575 1576 if (copy_from_user(&config, req->ifr_data, sizeof(config))) 1577 return -EFAULT; 1578 1579 if (config.flags) 1580 return -EINVAL; 1581 1582 switch (config.tx_type) { 1583 case HWTSTAMP_TX_OFF: 1584 tstamp_tx_ctrl = 0; 1585 break; 1586 case HWTSTAMP_TX_ON: 1587 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; 1588 break; 1589 default: 1590 return -ERANGE; 1591 } 1592 1593 switch (config.rx_filter) { 1594 case HWTSTAMP_FILTER_NONE: 1595 tstamp_rx_ctrl = 0; 1596 break; 1597 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1598 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 1599 break; 1600 default: 1601 config.rx_filter = HWTSTAMP_FILTER_ALL; 1602 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; 1603 break; 1604 } 1605 1606 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; 1607 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; 1608 1609 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1610 } 1611 1612 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 1613 { 1614 if (!netif_running(ndev)) 1615 return -EINVAL; 1616 1617 switch (cmd) { 1618 case SIOCGHWTSTAMP: 1619 return rswitch_hwstamp_get(ndev, req); 1620 case SIOCSHWTSTAMP: 1621 return rswitch_hwstamp_set(ndev, req); 1622 default: 1623 return phy_mii_ioctl(ndev->phydev, req, cmd); 1624 } 1625 } 1626 1627 static const struct net_device_ops rswitch_netdev_ops = { 1628 .ndo_open = rswitch_open, 1629 .ndo_stop = rswitch_stop, 1630 .ndo_start_xmit = rswitch_start_xmit, 1631 .ndo_get_stats = rswitch_get_stats, 1632 .ndo_eth_ioctl = rswitch_eth_ioctl, 1633 .ndo_validate_addr = eth_validate_addr, 1634 .ndo_set_mac_address = eth_mac_addr, 1635 }; 1636 1637 static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) 1638 { 1639 struct rswitch_device *rdev = netdev_priv(ndev); 1640 1641 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); 1642 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1643 SOF_TIMESTAMPING_RX_SOFTWARE | 1644 SOF_TIMESTAMPING_SOFTWARE | 1645 SOF_TIMESTAMPING_TX_HARDWARE | 1646 SOF_TIMESTAMPING_RX_HARDWARE | 1647 SOF_TIMESTAMPING_RAW_HARDWARE; 1648 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 1649 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 1650 1651 return 0; 1652 } 1653 1654 static const struct ethtool_ops rswitch_ethtool_ops = { 1655 .get_ts_info = rswitch_get_ts_info, 1656 }; 1657 1658 static const struct of_device_id renesas_eth_sw_of_table[] = { 1659 { .compatible = "renesas,r8a779f0-ether-switch", }, 1660 { } 1661 }; 1662 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table); 1663 1664 static void rswitch_etha_init(struct rswitch_private *priv, int index) 1665 { 1666 struct rswitch_etha *etha = &priv->etha[index]; 1667 1668 memset(etha, 0, sizeof(*etha)); 1669 etha->index = index; 1670 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; 1671 etha->coma_addr = priv->addr; 1672 } 1673 1674 static int rswitch_device_alloc(struct rswitch_private *priv, int index) 1675 { 1676 struct platform_device *pdev = priv->pdev; 1677 struct rswitch_device *rdev; 1678 struct net_device *ndev; 1679 int err; 1680 1681 if (index >= RSWITCH_NUM_PORTS) 1682 return -EINVAL; 1683 1684 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1); 1685 if (!ndev) 1686 return -ENOMEM; 1687 1688 SET_NETDEV_DEV(ndev, &pdev->dev); 1689 ether_setup(ndev); 1690 1691 rdev = netdev_priv(ndev); 1692 rdev->ndev = ndev; 1693 rdev->priv = priv; 1694 priv->rdev[index] = rdev; 1695 rdev->port = index; 1696 rdev->etha = &priv->etha[index]; 1697 rdev->addr = priv->addr; 1698 1699 ndev->base_addr = (unsigned long)rdev->addr; 1700 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); 1701 ndev->netdev_ops = &rswitch_netdev_ops; 1702 ndev->ethtool_ops = &rswitch_ethtool_ops; 1703 1704 netif_napi_add(ndev, &rdev->napi, rswitch_poll); 1705 1706 rdev->np_port = rswitch_get_port_node(rdev); 1707 rdev->disabled = !rdev->np_port; 1708 err = of_get_ethdev_address(rdev->np_port, ndev); 1709 of_node_put(rdev->np_port); 1710 if (err) { 1711 if (is_valid_ether_addr(rdev->etha->mac_addr)) 1712 eth_hw_addr_set(ndev, rdev->etha->mac_addr); 1713 else 1714 eth_hw_addr_random(ndev); 1715 } 1716 1717 err = rswitch_etha_get_params(rdev); 1718 if (err < 0) 1719 goto out_get_params; 1720 1721 if (rdev->priv->gwca.speed < rdev->etha->speed) 1722 rdev->priv->gwca.speed = rdev->etha->speed; 1723 1724 err = rswitch_rxdmac_alloc(ndev); 1725 if (err < 0) 1726 goto out_rxdmac; 1727 1728 err = rswitch_txdmac_alloc(ndev); 1729 if (err < 0) 1730 goto out_txdmac; 1731 1732 return 0; 1733 1734 out_txdmac: 1735 rswitch_rxdmac_free(ndev); 1736 1737 out_rxdmac: 1738 out_get_params: 1739 netif_napi_del(&rdev->napi); 1740 free_netdev(ndev); 1741 1742 return err; 1743 } 1744 1745 static void rswitch_device_free(struct rswitch_private *priv, int index) 1746 { 1747 struct rswitch_device *rdev = priv->rdev[index]; 1748 struct net_device *ndev = rdev->ndev; 1749 1750 rswitch_txdmac_free(ndev); 1751 rswitch_rxdmac_free(ndev); 1752 netif_napi_del(&rdev->napi); 1753 free_netdev(ndev); 1754 } 1755 1756 static int rswitch_init(struct rswitch_private *priv) 1757 { 1758 int i, err; 1759 1760 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1761 rswitch_etha_init(priv, i); 1762 1763 rswitch_clock_enable(priv); 1764 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1765 rswitch_etha_read_mac_address(&priv->etha[i]); 1766 1767 rswitch_reset(priv); 1768 1769 rswitch_clock_enable(priv); 1770 rswitch_top_init(priv); 1771 err = rswitch_bpool_config(priv); 1772 if (err < 0) 1773 return err; 1774 1775 rswitch_coma_init(priv); 1776 1777 err = rswitch_gwca_linkfix_alloc(priv); 1778 if (err < 0) 1779 return -ENOMEM; 1780 1781 err = rswitch_gwca_ts_queue_alloc(priv); 1782 if (err < 0) 1783 goto err_ts_queue_alloc; 1784 1785 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1786 err = rswitch_device_alloc(priv, i); 1787 if (err < 0) { 1788 for (i--; i >= 0; i--) 1789 rswitch_device_free(priv, i); 1790 goto err_device_alloc; 1791 } 1792 } 1793 1794 rswitch_fwd_init(priv); 1795 1796 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4, 1797 RCAR_GEN4_PTP_CLOCK_S4); 1798 if (err < 0) 1799 goto err_ptp_register; 1800 1801 err = rswitch_gwca_request_irqs(priv); 1802 if (err < 0) 1803 goto err_gwca_request_irq; 1804 1805 err = rswitch_gwca_ts_request_irqs(priv); 1806 if (err < 0) 1807 goto err_gwca_ts_request_irq; 1808 1809 err = rswitch_gwca_hw_init(priv); 1810 if (err < 0) 1811 goto err_gwca_hw_init; 1812 1813 err = rswitch_ether_port_init_all(priv); 1814 if (err) 1815 goto err_ether_port_init_all; 1816 1817 rswitch_for_each_enabled_port(priv, i) { 1818 err = register_netdev(priv->rdev[i]->ndev); 1819 if (err) { 1820 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1821 unregister_netdev(priv->rdev[i]->ndev); 1822 goto err_register_netdev; 1823 } 1824 } 1825 1826 rswitch_for_each_enabled_port(priv, i) 1827 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", 1828 priv->rdev[i]->ndev->dev_addr); 1829 1830 return 0; 1831 1832 err_register_netdev: 1833 rswitch_ether_port_deinit_all(priv); 1834 1835 err_ether_port_init_all: 1836 rswitch_gwca_hw_deinit(priv); 1837 1838 err_gwca_hw_init: 1839 err_gwca_ts_request_irq: 1840 err_gwca_request_irq: 1841 rcar_gen4_ptp_unregister(priv->ptp_priv); 1842 1843 err_ptp_register: 1844 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1845 rswitch_device_free(priv, i); 1846 1847 err_device_alloc: 1848 rswitch_gwca_ts_queue_free(priv); 1849 1850 err_ts_queue_alloc: 1851 rswitch_gwca_linkfix_free(priv); 1852 1853 return err; 1854 } 1855 1856 static int renesas_eth_sw_probe(struct platform_device *pdev) 1857 { 1858 struct rswitch_private *priv; 1859 struct resource *res; 1860 int ret; 1861 1862 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base"); 1863 if (!res) { 1864 dev_err(&pdev->dev, "invalid resource\n"); 1865 return -EINVAL; 1866 } 1867 1868 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 1869 if (!priv) 1870 return -ENOMEM; 1871 1872 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); 1873 if (!priv->ptp_priv) 1874 return -ENOMEM; 1875 1876 platform_set_drvdata(pdev, priv); 1877 priv->pdev = pdev; 1878 priv->addr = devm_ioremap_resource(&pdev->dev, res); 1879 if (IS_ERR(priv->addr)) 1880 return PTR_ERR(priv->addr); 1881 1882 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; 1883 1884 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 1885 if (ret < 0) { 1886 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1887 if (ret < 0) 1888 return ret; 1889 } 1890 1891 priv->gwca.index = AGENT_INDEX_GWCA; 1892 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, 1893 RSWITCH_MAX_NUM_QUEUES); 1894 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, 1895 sizeof(*priv->gwca.queues), GFP_KERNEL); 1896 if (!priv->gwca.queues) 1897 return -ENOMEM; 1898 1899 pm_runtime_enable(&pdev->dev); 1900 pm_runtime_get_sync(&pdev->dev); 1901 1902 ret = rswitch_init(priv); 1903 if (ret < 0) { 1904 pm_runtime_put(&pdev->dev); 1905 pm_runtime_disable(&pdev->dev); 1906 return ret; 1907 } 1908 1909 device_set_wakeup_capable(&pdev->dev, 1); 1910 1911 return ret; 1912 } 1913 1914 static void rswitch_deinit(struct rswitch_private *priv) 1915 { 1916 int i; 1917 1918 rswitch_gwca_hw_deinit(priv); 1919 rcar_gen4_ptp_unregister(priv->ptp_priv); 1920 1921 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1922 struct rswitch_device *rdev = priv->rdev[i]; 1923 1924 phy_exit(priv->rdev[i]->serdes); 1925 rswitch_ether_port_deinit_one(rdev); 1926 unregister_netdev(rdev->ndev); 1927 rswitch_device_free(priv, i); 1928 } 1929 1930 rswitch_gwca_ts_queue_free(priv); 1931 rswitch_gwca_linkfix_free(priv); 1932 1933 rswitch_clock_disable(priv); 1934 } 1935 1936 static int renesas_eth_sw_remove(struct platform_device *pdev) 1937 { 1938 struct rswitch_private *priv = platform_get_drvdata(pdev); 1939 1940 rswitch_deinit(priv); 1941 1942 pm_runtime_put(&pdev->dev); 1943 pm_runtime_disable(&pdev->dev); 1944 1945 platform_set_drvdata(pdev, NULL); 1946 1947 return 0; 1948 } 1949 1950 static struct platform_driver renesas_eth_sw_driver_platform = { 1951 .probe = renesas_eth_sw_probe, 1952 .remove = renesas_eth_sw_remove, 1953 .driver = { 1954 .name = "renesas_eth_sw", 1955 .of_match_table = renesas_eth_sw_of_table, 1956 } 1957 }; 1958 module_platform_driver(renesas_eth_sw_driver_platform); 1959 MODULE_AUTHOR("Yoshihiro Shimoda"); 1960 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver"); 1961 MODULE_LICENSE("GPL"); 1962