1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* Copyright (c) 2014 Linaro Ltd. 4 * Copyright (c) 2014 Hisilicon Limited. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/etherdevice.h> 9 #include <linux/platform_device.h> 10 #include <linux/interrupt.h> 11 #include <linux/ktime.h> 12 #include <linux/of_address.h> 13 #include <linux/phy.h> 14 #include <linux/of_mdio.h> 15 #include <linux/of_net.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/regmap.h> 18 19 #define SC_PPE_RESET_DREQ 0x026C 20 21 #define PPE_CFG_RX_ADDR 0x100 22 #define PPE_CFG_POOL_GRP 0x300 23 #define PPE_CFG_RX_BUF_SIZE 0x400 24 #define PPE_CFG_RX_FIFO_SIZE 0x500 25 #define PPE_CURR_BUF_CNT 0xa200 26 27 #define GE_DUPLEX_TYPE 0x08 28 #define GE_MAX_FRM_SIZE_REG 0x3c 29 #define GE_PORT_MODE 0x40 30 #define GE_PORT_EN 0x44 31 #define GE_SHORT_RUNTS_THR_REG 0x50 32 #define GE_TX_LOCAL_PAGE_REG 0x5c 33 #define GE_TRANSMIT_CONTROL_REG 0x60 34 #define GE_CF_CRC_STRIP_REG 0x1b0 35 #define GE_MODE_CHANGE_REG 0x1b4 36 #define GE_RECV_CONTROL_REG 0x1e0 37 #define GE_STATION_MAC_ADDRESS 0x210 38 39 #define PPE_CFG_BUS_CTRL_REG 0x424 40 #define PPE_CFG_RX_CTRL_REG 0x428 41 42 #if defined(CONFIG_HI13X1_GMAC) 43 #define PPE_CFG_CPU_ADD_ADDR 0x6D0 44 #define PPE_CFG_MAX_FRAME_LEN_REG 0x500 45 #define PPE_CFG_RX_PKT_MODE_REG 0x504 46 #define PPE_CFG_QOS_VMID_GEN 0x520 47 #define PPE_CFG_RX_PKT_INT 0x740 48 #define PPE_INTEN 0x700 49 #define PPE_INTSTS 0x708 50 #define PPE_RINT 0x704 51 #define PPE_CFG_STS_MODE 0x880 52 #else 53 #define PPE_CFG_CPU_ADD_ADDR 0x580 54 #define PPE_CFG_MAX_FRAME_LEN_REG 0x408 55 #define PPE_CFG_RX_PKT_MODE_REG 0x438 56 #define PPE_CFG_QOS_VMID_GEN 0x500 57 #define PPE_CFG_RX_PKT_INT 0x538 58 #define PPE_INTEN 0x600 59 #define PPE_INTSTS 0x608 60 #define PPE_RINT 0x604 61 #define PPE_CFG_STS_MODE 0x700 62 #endif /* CONFIG_HI13X1_GMAC */ 63 64 #define PPE_HIS_RX_PKT_CNT 0x804 65 66 #define RESET_DREQ_ALL 0xffffffff 67 68 /* REG_INTERRUPT */ 69 #define RCV_INT BIT(10) 70 #define RCV_NOBUF BIT(8) 71 #define RCV_DROP BIT(7) 72 #define TX_DROP BIT(6) 73 #define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP) 74 #define DEF_INT_MASK (RCV_INT | DEF_INT_ERR) 75 76 /* TX descriptor config */ 77 #define TX_FREE_MEM BIT(0) 78 #define TX_READ_ALLOC_L3 BIT(1) 79 #if defined(CONFIG_HI13X1_GMAC) 80 #define TX_CLEAR_WB BIT(7) 81 #define TX_RELEASE_TO_PPE BIT(4) 82 #define TX_FINISH_CACHE_INV BIT(6) 83 #define TX_POOL_SHIFT 16 84 #else 85 #define TX_CLEAR_WB BIT(4) 86 #define TX_FINISH_CACHE_INV BIT(2) 87 #endif 88 #define TX_L3_CHECKSUM BIT(5) 89 #define TX_LOOP_BACK BIT(11) 90 91 /* RX error */ 92 #define RX_PKT_DROP BIT(0) 93 #define RX_L2_ERR BIT(1) 94 #define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR) 95 96 #define SGMII_SPEED_1000 0x08 97 #define SGMII_SPEED_100 0x07 98 #define SGMII_SPEED_10 0x06 99 #define MII_SPEED_100 0x01 100 #define MII_SPEED_10 0x00 101 102 #define GE_DUPLEX_FULL BIT(0) 103 #define GE_DUPLEX_HALF 0x00 104 #define GE_MODE_CHANGE_EN BIT(0) 105 106 #define GE_TX_AUTO_NEG BIT(5) 107 #define GE_TX_ADD_CRC BIT(6) 108 #define GE_TX_SHORT_PAD_THROUGH BIT(7) 109 110 #define GE_RX_STRIP_CRC BIT(0) 111 #define GE_RX_STRIP_PAD BIT(3) 112 #define GE_RX_PAD_EN BIT(4) 113 114 #define GE_AUTO_NEG_CTL BIT(0) 115 116 #define GE_RX_INT_THRESHOLD BIT(6) 117 #define GE_RX_TIMEOUT 0x04 118 119 #define GE_RX_PORT_EN BIT(1) 120 #define GE_TX_PORT_EN BIT(2) 121 122 #define PPE_CFG_RX_PKT_ALIGN BIT(18) 123 124 #if defined(CONFIG_HI13X1_GMAC) 125 #define PPE_CFG_QOS_VMID_GRP_SHIFT 4 126 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 7 127 #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(0) 128 #define PPE_CFG_QOS_VMID_MODE BIT(15) 129 #define PPE_CFG_BUS_LOCAL_REL (BIT(9) | BIT(15) | BIT(19) | BIT(23)) 130 131 /* buf unit size is cache_line_size, which is 64, so the shift is 6 */ 132 #define PPE_BUF_SIZE_SHIFT 6 133 #define PPE_TX_BUF_HOLD BIT(31) 134 #define CACHE_LINE_MASK 0x3F 135 #else 136 #define PPE_CFG_QOS_VMID_GRP_SHIFT 8 137 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11 138 #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12) 139 #define PPE_CFG_QOS_VMID_MODE BIT(14) 140 #define PPE_CFG_BUS_LOCAL_REL BIT(14) 141 142 /* buf unit size is 1, so the shift is 6 */ 143 #define PPE_BUF_SIZE_SHIFT 0 144 #define PPE_TX_BUF_HOLD 0 145 #endif /* CONFIG_HI13X1_GMAC */ 146 147 #define PPE_CFG_RX_FIFO_FSFU BIT(11) 148 #define PPE_CFG_RX_DEPTH_SHIFT 16 149 #define PPE_CFG_RX_START_SHIFT 0 150 151 #define PPE_CFG_BUS_BIG_ENDIEN BIT(0) 152 153 #define RX_DESC_NUM 128 154 #define TX_DESC_NUM 256 155 #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1)) 156 #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1)) 157 158 #define GMAC_PPE_RX_PKT_MAX_LEN 379 159 #define GMAC_MAX_PKT_LEN 1516 160 #define GMAC_MIN_PKT_LEN 31 161 #define RX_BUF_SIZE 1600 162 #define RESET_TIMEOUT 1000 163 #define TX_TIMEOUT (6 * HZ) 164 165 #define DRV_NAME "hip04-ether" 166 #define DRV_VERSION "v1.0" 167 168 #define HIP04_MAX_TX_COALESCE_USECS 200 169 #define HIP04_MIN_TX_COALESCE_USECS 100 170 #define HIP04_MAX_TX_COALESCE_FRAMES 200 171 #define HIP04_MIN_TX_COALESCE_FRAMES 100 172 173 struct tx_desc { 174 #if defined(CONFIG_HI13X1_GMAC) 175 u32 reserved1[2]; 176 u32 send_addr; 177 u16 send_size; 178 u16 data_offset; 179 u32 reserved2[7]; 180 u32 cfg; 181 u32 wb_addr; 182 u32 reserved3[3]; 183 #else 184 u32 send_addr; 185 u32 send_size; 186 u32 next_addr; 187 u32 cfg; 188 u32 wb_addr; 189 #endif 190 } __aligned(64); 191 192 struct rx_desc { 193 #if defined(CONFIG_HI13X1_GMAC) 194 u32 reserved1[3]; 195 u16 pkt_len; 196 u16 reserved_16; 197 u32 reserved2[6]; 198 u32 pkt_err; 199 u32 reserved3[5]; 200 #else 201 u16 reserved_16; 202 u16 pkt_len; 203 u32 reserve1[3]; 204 u32 pkt_err; 205 u32 reserve2[4]; 206 #endif 207 }; 208 209 struct hip04_priv { 210 void __iomem *base; 211 #if defined(CONFIG_HI13X1_GMAC) 212 void __iomem *sysctrl_base; 213 #endif 214 int phy_mode; 215 int chan; 216 unsigned int port; 217 unsigned int group; 218 unsigned int speed; 219 unsigned int duplex; 220 unsigned int reg_inten; 221 222 struct napi_struct napi; 223 struct device *dev; 224 struct net_device *ndev; 225 226 struct tx_desc *tx_desc; 227 dma_addr_t tx_desc_dma; 228 struct sk_buff *tx_skb[TX_DESC_NUM]; 229 dma_addr_t tx_phys[TX_DESC_NUM]; 230 unsigned int tx_head; 231 232 int tx_coalesce_frames; 233 int tx_coalesce_usecs; 234 struct hrtimer tx_coalesce_timer; 235 236 unsigned char *rx_buf[RX_DESC_NUM]; 237 dma_addr_t rx_phys[RX_DESC_NUM]; 238 unsigned int rx_head; 239 unsigned int rx_buf_size; 240 241 struct device_node *phy_node; 242 struct phy_device *phy; 243 struct regmap *map; 244 struct work_struct tx_timeout_task; 245 246 /* written only by tx cleanup */ 247 unsigned int tx_tail ____cacheline_aligned_in_smp; 248 }; 249 250 static inline unsigned int tx_count(unsigned int head, unsigned int tail) 251 { 252 return (head - tail) % TX_DESC_NUM; 253 } 254 255 static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex) 256 { 257 struct hip04_priv *priv = netdev_priv(ndev); 258 u32 val; 259 260 priv->speed = speed; 261 priv->duplex = duplex; 262 263 switch (priv->phy_mode) { 264 case PHY_INTERFACE_MODE_SGMII: 265 if (speed == SPEED_1000) 266 val = SGMII_SPEED_1000; 267 else if (speed == SPEED_100) 268 val = SGMII_SPEED_100; 269 else 270 val = SGMII_SPEED_10; 271 break; 272 case PHY_INTERFACE_MODE_MII: 273 if (speed == SPEED_100) 274 val = MII_SPEED_100; 275 else 276 val = MII_SPEED_10; 277 break; 278 default: 279 netdev_warn(ndev, "not supported mode\n"); 280 val = MII_SPEED_10; 281 break; 282 } 283 writel_relaxed(val, priv->base + GE_PORT_MODE); 284 285 val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF; 286 writel_relaxed(val, priv->base + GE_DUPLEX_TYPE); 287 288 val = GE_MODE_CHANGE_EN; 289 writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG); 290 } 291 292 static void hip04_reset_dreq(struct hip04_priv *priv) 293 { 294 #if defined(CONFIG_HI13X1_GMAC) 295 writel_relaxed(RESET_DREQ_ALL, priv->sysctrl_base + SC_PPE_RESET_DREQ); 296 #endif 297 } 298 299 static void hip04_reset_ppe(struct hip04_priv *priv) 300 { 301 u32 val, tmp, timeout = 0; 302 303 do { 304 regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val); 305 regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp); 306 if (timeout++ > RESET_TIMEOUT) 307 break; 308 } while (val & 0xfff); 309 } 310 311 static void hip04_config_fifo(struct hip04_priv *priv) 312 { 313 u32 val; 314 315 val = readl_relaxed(priv->base + PPE_CFG_STS_MODE); 316 val |= PPE_CFG_STS_RX_PKT_CNT_RC; 317 writel_relaxed(val, priv->base + PPE_CFG_STS_MODE); 318 319 val = BIT(priv->group); 320 regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val); 321 322 val = priv->group << PPE_CFG_QOS_VMID_GRP_SHIFT; 323 val |= PPE_CFG_QOS_VMID_MODE; 324 writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN); 325 326 val = RX_BUF_SIZE >> PPE_BUF_SIZE_SHIFT; 327 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val); 328 329 val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT; 330 val |= PPE_CFG_RX_FIFO_FSFU; 331 val |= priv->chan << PPE_CFG_RX_START_SHIFT; 332 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val); 333 334 val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT; 335 writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG); 336 337 val = PPE_CFG_RX_PKT_ALIGN; 338 writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG); 339 340 val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN; 341 writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG); 342 343 val = GMAC_PPE_RX_PKT_MAX_LEN; 344 writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG); 345 346 val = GMAC_MAX_PKT_LEN; 347 writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG); 348 349 val = GMAC_MIN_PKT_LEN; 350 writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG); 351 352 val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG); 353 val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH; 354 writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG); 355 356 val = GE_RX_STRIP_CRC; 357 writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG); 358 359 val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG); 360 val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN; 361 writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG); 362 363 #ifndef CONFIG_HI13X1_GMAC 364 val = GE_AUTO_NEG_CTL; 365 writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG); 366 #endif 367 } 368 369 static void hip04_mac_enable(struct net_device *ndev) 370 { 371 struct hip04_priv *priv = netdev_priv(ndev); 372 u32 val; 373 374 /* enable tx & rx */ 375 val = readl_relaxed(priv->base + GE_PORT_EN); 376 val |= GE_RX_PORT_EN | GE_TX_PORT_EN; 377 writel_relaxed(val, priv->base + GE_PORT_EN); 378 379 /* clear rx int */ 380 val = RCV_INT; 381 writel_relaxed(val, priv->base + PPE_RINT); 382 383 /* config recv int */ 384 val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT; 385 writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT); 386 387 /* enable interrupt */ 388 priv->reg_inten = DEF_INT_MASK; 389 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); 390 } 391 392 static void hip04_mac_disable(struct net_device *ndev) 393 { 394 struct hip04_priv *priv = netdev_priv(ndev); 395 u32 val; 396 397 /* disable int */ 398 priv->reg_inten &= ~(DEF_INT_MASK); 399 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); 400 401 /* disable tx & rx */ 402 val = readl_relaxed(priv->base + GE_PORT_EN); 403 val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN); 404 writel_relaxed(val, priv->base + GE_PORT_EN); 405 } 406 407 static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys) 408 { 409 u32 val; 410 411 val = phys >> PPE_BUF_SIZE_SHIFT | PPE_TX_BUF_HOLD; 412 writel(val, priv->base + PPE_CFG_CPU_ADD_ADDR); 413 } 414 415 static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys) 416 { 417 u32 val; 418 419 val = phys >> PPE_BUF_SIZE_SHIFT; 420 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val); 421 } 422 423 static u32 hip04_recv_cnt(struct hip04_priv *priv) 424 { 425 return readl(priv->base + PPE_HIS_RX_PKT_CNT); 426 } 427 428 static void hip04_update_mac_address(struct net_device *ndev) 429 { 430 struct hip04_priv *priv = netdev_priv(ndev); 431 432 writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])), 433 priv->base + GE_STATION_MAC_ADDRESS); 434 writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) | 435 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])), 436 priv->base + GE_STATION_MAC_ADDRESS + 4); 437 } 438 439 static int hip04_set_mac_address(struct net_device *ndev, void *addr) 440 { 441 eth_mac_addr(ndev, addr); 442 hip04_update_mac_address(ndev); 443 return 0; 444 } 445 446 static int hip04_tx_reclaim(struct net_device *ndev, bool force) 447 { 448 struct hip04_priv *priv = netdev_priv(ndev); 449 unsigned tx_tail = priv->tx_tail; 450 struct tx_desc *desc; 451 unsigned int bytes_compl = 0, pkts_compl = 0; 452 unsigned int count; 453 454 smp_rmb(); 455 count = tx_count(READ_ONCE(priv->tx_head), tx_tail); 456 if (count == 0) 457 goto out; 458 459 while (count) { 460 desc = &priv->tx_desc[tx_tail]; 461 if (desc->send_addr != 0) { 462 if (force) 463 desc->send_addr = 0; 464 else 465 break; 466 } 467 468 if (priv->tx_phys[tx_tail]) { 469 dma_unmap_single(priv->dev, priv->tx_phys[tx_tail], 470 priv->tx_skb[tx_tail]->len, 471 DMA_TO_DEVICE); 472 priv->tx_phys[tx_tail] = 0; 473 } 474 pkts_compl++; 475 bytes_compl += priv->tx_skb[tx_tail]->len; 476 dev_kfree_skb(priv->tx_skb[tx_tail]); 477 priv->tx_skb[tx_tail] = NULL; 478 tx_tail = TX_NEXT(tx_tail); 479 count--; 480 } 481 482 priv->tx_tail = tx_tail; 483 smp_wmb(); /* Ensure tx_tail visible to xmit */ 484 485 out: 486 if (pkts_compl || bytes_compl) 487 netdev_completed_queue(ndev, pkts_compl, bytes_compl); 488 489 if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1))) 490 netif_wake_queue(ndev); 491 492 return count; 493 } 494 495 static void hip04_start_tx_timer(struct hip04_priv *priv) 496 { 497 unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2; 498 499 /* allow timer to fire after half the time at the earliest */ 500 hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns), 501 ns, HRTIMER_MODE_REL); 502 } 503 504 static netdev_tx_t 505 hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) 506 { 507 struct hip04_priv *priv = netdev_priv(ndev); 508 struct net_device_stats *stats = &ndev->stats; 509 unsigned int tx_head = priv->tx_head, count; 510 struct tx_desc *desc = &priv->tx_desc[tx_head]; 511 dma_addr_t phys; 512 513 smp_rmb(); 514 count = tx_count(tx_head, READ_ONCE(priv->tx_tail)); 515 if (count == (TX_DESC_NUM - 1)) { 516 netif_stop_queue(ndev); 517 return NETDEV_TX_BUSY; 518 } 519 520 phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE); 521 if (dma_mapping_error(priv->dev, phys)) { 522 dev_kfree_skb(skb); 523 return NETDEV_TX_OK; 524 } 525 526 priv->tx_skb[tx_head] = skb; 527 priv->tx_phys[tx_head] = phys; 528 529 desc->send_size = (__force u32)cpu_to_be32(skb->len); 530 #if defined(CONFIG_HI13X1_GMAC) 531 desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV 532 | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT); 533 desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK); 534 desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK); 535 #else 536 desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV); 537 desc->send_addr = (__force u32)cpu_to_be32(phys); 538 #endif 539 phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc); 540 desc->wb_addr = (__force u32)cpu_to_be32(phys + 541 offsetof(struct tx_desc, send_addr)); 542 skb_tx_timestamp(skb); 543 544 hip04_set_xmit_desc(priv, phys); 545 priv->tx_head = TX_NEXT(tx_head); 546 count++; 547 netdev_sent_queue(ndev, skb->len); 548 549 stats->tx_bytes += skb->len; 550 stats->tx_packets++; 551 552 /* Ensure tx_head update visible to tx reclaim */ 553 smp_wmb(); 554 555 /* queue is getting full, better start cleaning up now */ 556 if (count >= priv->tx_coalesce_frames) { 557 if (napi_schedule_prep(&priv->napi)) { 558 /* disable rx interrupt and timer */ 559 priv->reg_inten &= ~(RCV_INT); 560 writel_relaxed(DEF_INT_MASK & ~RCV_INT, 561 priv->base + PPE_INTEN); 562 hrtimer_cancel(&priv->tx_coalesce_timer); 563 __napi_schedule(&priv->napi); 564 } 565 } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) { 566 /* cleanup not pending yet, start a new timer */ 567 hip04_start_tx_timer(priv); 568 } 569 570 return NETDEV_TX_OK; 571 } 572 573 static int hip04_rx_poll(struct napi_struct *napi, int budget) 574 { 575 struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi); 576 struct net_device *ndev = priv->ndev; 577 struct net_device_stats *stats = &ndev->stats; 578 unsigned int cnt = hip04_recv_cnt(priv); 579 struct rx_desc *desc; 580 struct sk_buff *skb; 581 unsigned char *buf; 582 bool last = false; 583 dma_addr_t phys; 584 int rx = 0; 585 int tx_remaining; 586 u16 len; 587 u32 err; 588 589 /* clean up tx descriptors */ 590 tx_remaining = hip04_tx_reclaim(ndev, false); 591 592 while (cnt && !last) { 593 buf = priv->rx_buf[priv->rx_head]; 594 skb = build_skb(buf, priv->rx_buf_size); 595 if (unlikely(!skb)) { 596 net_dbg_ratelimited("build_skb failed\n"); 597 goto refill; 598 } 599 600 dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head], 601 RX_BUF_SIZE, DMA_FROM_DEVICE); 602 priv->rx_phys[priv->rx_head] = 0; 603 604 desc = (struct rx_desc *)skb->data; 605 len = be16_to_cpu((__force __be16)desc->pkt_len); 606 err = be32_to_cpu((__force __be32)desc->pkt_err); 607 608 if (0 == len) { 609 dev_kfree_skb_any(skb); 610 last = true; 611 } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) { 612 dev_kfree_skb_any(skb); 613 stats->rx_dropped++; 614 stats->rx_errors++; 615 } else { 616 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 617 skb_put(skb, len); 618 skb->protocol = eth_type_trans(skb, ndev); 619 napi_gro_receive(&priv->napi, skb); 620 stats->rx_packets++; 621 stats->rx_bytes += len; 622 rx++; 623 } 624 625 refill: 626 buf = netdev_alloc_frag(priv->rx_buf_size); 627 if (!buf) 628 goto done; 629 phys = dma_map_single(priv->dev, buf, 630 RX_BUF_SIZE, DMA_FROM_DEVICE); 631 if (dma_mapping_error(priv->dev, phys)) 632 goto done; 633 priv->rx_buf[priv->rx_head] = buf; 634 priv->rx_phys[priv->rx_head] = phys; 635 hip04_set_recv_desc(priv, phys); 636 637 priv->rx_head = RX_NEXT(priv->rx_head); 638 if (rx >= budget) 639 goto done; 640 641 if (--cnt == 0) 642 cnt = hip04_recv_cnt(priv); 643 } 644 645 if (!(priv->reg_inten & RCV_INT)) { 646 /* enable rx interrupt */ 647 priv->reg_inten |= RCV_INT; 648 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); 649 } 650 napi_complete_done(napi, rx); 651 done: 652 /* start a new timer if necessary */ 653 if (rx < budget && tx_remaining) 654 hip04_start_tx_timer(priv); 655 656 return rx; 657 } 658 659 static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id) 660 { 661 struct net_device *ndev = (struct net_device *)dev_id; 662 struct hip04_priv *priv = netdev_priv(ndev); 663 struct net_device_stats *stats = &ndev->stats; 664 u32 ists = readl_relaxed(priv->base + PPE_INTSTS); 665 666 if (!ists) 667 return IRQ_NONE; 668 669 writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT); 670 671 if (unlikely(ists & DEF_INT_ERR)) { 672 if (ists & (RCV_NOBUF | RCV_DROP)) { 673 stats->rx_errors++; 674 stats->rx_dropped++; 675 netdev_err(ndev, "rx drop\n"); 676 } 677 if (ists & TX_DROP) { 678 stats->tx_dropped++; 679 netdev_err(ndev, "tx drop\n"); 680 } 681 } 682 683 if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) { 684 /* disable rx interrupt */ 685 priv->reg_inten &= ~(RCV_INT); 686 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN); 687 hrtimer_cancel(&priv->tx_coalesce_timer); 688 __napi_schedule(&priv->napi); 689 } 690 691 return IRQ_HANDLED; 692 } 693 694 static enum hrtimer_restart tx_done(struct hrtimer *hrtimer) 695 { 696 struct hip04_priv *priv; 697 698 priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer); 699 700 if (napi_schedule_prep(&priv->napi)) { 701 /* disable rx interrupt */ 702 priv->reg_inten &= ~(RCV_INT); 703 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN); 704 __napi_schedule(&priv->napi); 705 } 706 707 return HRTIMER_NORESTART; 708 } 709 710 static void hip04_adjust_link(struct net_device *ndev) 711 { 712 struct hip04_priv *priv = netdev_priv(ndev); 713 struct phy_device *phy = priv->phy; 714 715 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) { 716 hip04_config_port(ndev, phy->speed, phy->duplex); 717 phy_print_status(phy); 718 } 719 } 720 721 static int hip04_mac_open(struct net_device *ndev) 722 { 723 struct hip04_priv *priv = netdev_priv(ndev); 724 int i; 725 726 priv->rx_head = 0; 727 priv->tx_head = 0; 728 priv->tx_tail = 0; 729 hip04_reset_ppe(priv); 730 731 for (i = 0; i < RX_DESC_NUM; i++) { 732 dma_addr_t phys; 733 734 phys = dma_map_single(priv->dev, priv->rx_buf[i], 735 RX_BUF_SIZE, DMA_FROM_DEVICE); 736 if (dma_mapping_error(priv->dev, phys)) 737 return -EIO; 738 739 priv->rx_phys[i] = phys; 740 hip04_set_recv_desc(priv, phys); 741 } 742 743 if (priv->phy) 744 phy_start(priv->phy); 745 746 netdev_reset_queue(ndev); 747 netif_start_queue(ndev); 748 hip04_mac_enable(ndev); 749 napi_enable(&priv->napi); 750 751 return 0; 752 } 753 754 static int hip04_mac_stop(struct net_device *ndev) 755 { 756 struct hip04_priv *priv = netdev_priv(ndev); 757 int i; 758 759 napi_disable(&priv->napi); 760 netif_stop_queue(ndev); 761 hip04_mac_disable(ndev); 762 hip04_tx_reclaim(ndev, true); 763 hip04_reset_ppe(priv); 764 765 if (priv->phy) 766 phy_stop(priv->phy); 767 768 for (i = 0; i < RX_DESC_NUM; i++) { 769 if (priv->rx_phys[i]) { 770 dma_unmap_single(priv->dev, priv->rx_phys[i], 771 RX_BUF_SIZE, DMA_FROM_DEVICE); 772 priv->rx_phys[i] = 0; 773 } 774 } 775 776 return 0; 777 } 778 779 static void hip04_timeout(struct net_device *ndev) 780 { 781 struct hip04_priv *priv = netdev_priv(ndev); 782 783 schedule_work(&priv->tx_timeout_task); 784 } 785 786 static void hip04_tx_timeout_task(struct work_struct *work) 787 { 788 struct hip04_priv *priv; 789 790 priv = container_of(work, struct hip04_priv, tx_timeout_task); 791 hip04_mac_stop(priv->ndev); 792 hip04_mac_open(priv->ndev); 793 } 794 795 static int hip04_get_coalesce(struct net_device *netdev, 796 struct ethtool_coalesce *ec) 797 { 798 struct hip04_priv *priv = netdev_priv(netdev); 799 800 ec->tx_coalesce_usecs = priv->tx_coalesce_usecs; 801 ec->tx_max_coalesced_frames = priv->tx_coalesce_frames; 802 803 return 0; 804 } 805 806 static int hip04_set_coalesce(struct net_device *netdev, 807 struct ethtool_coalesce *ec) 808 { 809 struct hip04_priv *priv = netdev_priv(netdev); 810 811 /* Check not supported parameters */ 812 if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) || 813 (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) || 814 (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) || 815 (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) || 816 (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) || 817 (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) || 818 (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) || 819 (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) || 820 (ec->tx_max_coalesced_frames_irq) || 821 (ec->stats_block_coalesce_usecs) || 822 (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) 823 return -EOPNOTSUPP; 824 825 if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS || 826 ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) || 827 (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES || 828 ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES)) 829 return -EINVAL; 830 831 priv->tx_coalesce_usecs = ec->tx_coalesce_usecs; 832 priv->tx_coalesce_frames = ec->tx_max_coalesced_frames; 833 834 return 0; 835 } 836 837 static void hip04_get_drvinfo(struct net_device *netdev, 838 struct ethtool_drvinfo *drvinfo) 839 { 840 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 841 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); 842 } 843 844 static const struct ethtool_ops hip04_ethtool_ops = { 845 .get_coalesce = hip04_get_coalesce, 846 .set_coalesce = hip04_set_coalesce, 847 .get_drvinfo = hip04_get_drvinfo, 848 }; 849 850 static const struct net_device_ops hip04_netdev_ops = { 851 .ndo_open = hip04_mac_open, 852 .ndo_stop = hip04_mac_stop, 853 .ndo_start_xmit = hip04_mac_start_xmit, 854 .ndo_set_mac_address = hip04_set_mac_address, 855 .ndo_tx_timeout = hip04_timeout, 856 .ndo_validate_addr = eth_validate_addr, 857 }; 858 859 static int hip04_alloc_ring(struct net_device *ndev, struct device *d) 860 { 861 struct hip04_priv *priv = netdev_priv(ndev); 862 int i; 863 864 priv->tx_desc = dma_alloc_coherent(d, 865 TX_DESC_NUM * sizeof(struct tx_desc), 866 &priv->tx_desc_dma, GFP_KERNEL); 867 if (!priv->tx_desc) 868 return -ENOMEM; 869 870 priv->rx_buf_size = RX_BUF_SIZE + 871 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 872 for (i = 0; i < RX_DESC_NUM; i++) { 873 priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size); 874 if (!priv->rx_buf[i]) 875 return -ENOMEM; 876 } 877 878 return 0; 879 } 880 881 static void hip04_free_ring(struct net_device *ndev, struct device *d) 882 { 883 struct hip04_priv *priv = netdev_priv(ndev); 884 int i; 885 886 for (i = 0; i < RX_DESC_NUM; i++) 887 if (priv->rx_buf[i]) 888 skb_free_frag(priv->rx_buf[i]); 889 890 for (i = 0; i < TX_DESC_NUM; i++) 891 if (priv->tx_skb[i]) 892 dev_kfree_skb_any(priv->tx_skb[i]); 893 894 dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc), 895 priv->tx_desc, priv->tx_desc_dma); 896 } 897 898 static int hip04_mac_probe(struct platform_device *pdev) 899 { 900 struct device *d = &pdev->dev; 901 struct device_node *node = d->of_node; 902 struct of_phandle_args arg; 903 struct net_device *ndev; 904 struct hip04_priv *priv; 905 int irq; 906 int ret; 907 908 ndev = alloc_etherdev(sizeof(struct hip04_priv)); 909 if (!ndev) 910 return -ENOMEM; 911 912 priv = netdev_priv(ndev); 913 priv->dev = d; 914 priv->ndev = ndev; 915 platform_set_drvdata(pdev, ndev); 916 SET_NETDEV_DEV(ndev, &pdev->dev); 917 918 priv->base = devm_platform_ioremap_resource(pdev, 0); 919 if (IS_ERR(priv->base)) { 920 ret = PTR_ERR(priv->base); 921 goto init_fail; 922 } 923 924 #if defined(CONFIG_HI13X1_GMAC) 925 priv->sysctrl_base = devm_platform_ioremap_resource(pdev, 1); 926 if (IS_ERR(priv->sysctrl_base)) { 927 ret = PTR_ERR(priv->sysctrl_base); 928 goto init_fail; 929 } 930 #endif 931 932 ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg); 933 if (ret < 0) { 934 dev_warn(d, "no port-handle\n"); 935 goto init_fail; 936 } 937 938 priv->port = arg.args[0]; 939 priv->chan = arg.args[1] * RX_DESC_NUM; 940 priv->group = arg.args[2]; 941 942 hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 943 944 /* BQL will try to keep the TX queue as short as possible, but it can't 945 * be faster than tx_coalesce_usecs, so we need a fast timeout here, 946 * but also long enough to gather up enough frames to ensure we don't 947 * get more interrupts than necessary. 948 * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate 949 */ 950 priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4; 951 priv->tx_coalesce_usecs = 200; 952 priv->tx_coalesce_timer.function = tx_done; 953 954 priv->map = syscon_node_to_regmap(arg.np); 955 if (IS_ERR(priv->map)) { 956 dev_warn(d, "no syscon hisilicon,hip04-ppe\n"); 957 ret = PTR_ERR(priv->map); 958 goto init_fail; 959 } 960 961 priv->phy_mode = of_get_phy_mode(node); 962 if (priv->phy_mode < 0) { 963 dev_warn(d, "not find phy-mode\n"); 964 ret = -EINVAL; 965 goto init_fail; 966 } 967 968 irq = platform_get_irq(pdev, 0); 969 if (irq <= 0) { 970 ret = -EINVAL; 971 goto init_fail; 972 } 973 974 ret = devm_request_irq(d, irq, hip04_mac_interrupt, 975 0, pdev->name, ndev); 976 if (ret) { 977 netdev_err(ndev, "devm_request_irq failed\n"); 978 goto init_fail; 979 } 980 981 priv->phy_node = of_parse_phandle(node, "phy-handle", 0); 982 if (priv->phy_node) { 983 priv->phy = of_phy_connect(ndev, priv->phy_node, 984 &hip04_adjust_link, 985 0, priv->phy_mode); 986 if (!priv->phy) { 987 ret = -EPROBE_DEFER; 988 goto init_fail; 989 } 990 } 991 992 INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task); 993 994 ndev->netdev_ops = &hip04_netdev_ops; 995 ndev->ethtool_ops = &hip04_ethtool_ops; 996 ndev->watchdog_timeo = TX_TIMEOUT; 997 ndev->priv_flags |= IFF_UNICAST_FLT; 998 ndev->irq = irq; 999 netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT); 1000 1001 hip04_reset_dreq(priv); 1002 hip04_reset_ppe(priv); 1003 if (priv->phy_mode == PHY_INTERFACE_MODE_MII) 1004 hip04_config_port(ndev, SPEED_100, DUPLEX_FULL); 1005 1006 hip04_config_fifo(priv); 1007 eth_random_addr(ndev->dev_addr); 1008 hip04_update_mac_address(ndev); 1009 1010 ret = hip04_alloc_ring(ndev, d); 1011 if (ret) { 1012 netdev_err(ndev, "alloc ring fail\n"); 1013 goto alloc_fail; 1014 } 1015 1016 ret = register_netdev(ndev); 1017 if (ret) 1018 goto alloc_fail; 1019 1020 return 0; 1021 1022 alloc_fail: 1023 hip04_free_ring(ndev, d); 1024 init_fail: 1025 of_node_put(priv->phy_node); 1026 free_netdev(ndev); 1027 return ret; 1028 } 1029 1030 static int hip04_remove(struct platform_device *pdev) 1031 { 1032 struct net_device *ndev = platform_get_drvdata(pdev); 1033 struct hip04_priv *priv = netdev_priv(ndev); 1034 struct device *d = &pdev->dev; 1035 1036 if (priv->phy) 1037 phy_disconnect(priv->phy); 1038 1039 hip04_free_ring(ndev, d); 1040 unregister_netdev(ndev); 1041 free_irq(ndev->irq, ndev); 1042 of_node_put(priv->phy_node); 1043 cancel_work_sync(&priv->tx_timeout_task); 1044 free_netdev(ndev); 1045 1046 return 0; 1047 } 1048 1049 static const struct of_device_id hip04_mac_match[] = { 1050 { .compatible = "hisilicon,hip04-mac" }, 1051 { } 1052 }; 1053 1054 MODULE_DEVICE_TABLE(of, hip04_mac_match); 1055 1056 static struct platform_driver hip04_mac_driver = { 1057 .probe = hip04_mac_probe, 1058 .remove = hip04_remove, 1059 .driver = { 1060 .name = DRV_NAME, 1061 .of_match_table = hip04_mac_match, 1062 }, 1063 }; 1064 module_platform_driver(hip04_mac_driver); 1065 1066 MODULE_DESCRIPTION("HISILICON P04 Ethernet driver"); 1067 MODULE_LICENSE("GPL"); 1068