1 // SPDX-License-Identifier: GPL-2.0+ 2 3 #include <linux/types.h> 4 #include <linux/clk.h> 5 #include <linux/platform_device.h> 6 #include <linux/pm_runtime.h> 7 #include <linux/acpi.h> 8 #include <linux/of_mdio.h> 9 #include <linux/of_net.h> 10 #include <linux/etherdevice.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/netlink.h> 14 #include <linux/bpf.h> 15 #include <linux/bpf_trace.h> 16 17 #include <net/tcp.h> 18 #include <net/page_pool.h> 19 #include <net/ip6_checksum.h> 20 21 #define NETSEC_REG_SOFT_RST 0x104 22 #define NETSEC_REG_COM_INIT 0x120 23 24 #define NETSEC_REG_TOP_STATUS 0x200 25 #define NETSEC_IRQ_RX BIT(1) 26 #define NETSEC_IRQ_TX BIT(0) 27 28 #define NETSEC_REG_TOP_INTEN 0x204 29 #define NETSEC_REG_INTEN_SET 0x234 30 #define NETSEC_REG_INTEN_CLR 0x238 31 32 #define NETSEC_REG_NRM_TX_STATUS 0x400 33 #define NETSEC_REG_NRM_TX_INTEN 0x404 34 #define NETSEC_REG_NRM_TX_INTEN_SET 0x428 35 #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c 36 #define NRM_TX_ST_NTOWNR BIT(17) 37 #define NRM_TX_ST_TR_ERR BIT(16) 38 #define NRM_TX_ST_TXDONE BIT(15) 39 #define NRM_TX_ST_TMREXP BIT(14) 40 41 #define NETSEC_REG_NRM_RX_STATUS 0x440 42 #define NETSEC_REG_NRM_RX_INTEN 0x444 43 #define NETSEC_REG_NRM_RX_INTEN_SET 0x468 44 #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c 45 #define NRM_RX_ST_RC_ERR BIT(16) 46 #define NRM_RX_ST_PKTCNT BIT(15) 47 #define NRM_RX_ST_TMREXP BIT(14) 48 49 #define NETSEC_REG_PKT_CMD_BUF 0xd0 50 51 #define NETSEC_REG_CLK_EN 0x100 52 53 #define NETSEC_REG_PKT_CTRL 0x140 54 55 #define NETSEC_REG_DMA_TMR_CTRL 0x20c 56 #define NETSEC_REG_F_TAIKI_MC_VER 0x22c 57 #define NETSEC_REG_F_TAIKI_VER 0x230 58 #define NETSEC_REG_DMA_HM_CTRL 0x214 59 #define NETSEC_REG_DMA_MH_CTRL 0x220 60 #define NETSEC_REG_ADDR_DIS_CORE 0x218 61 #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210 62 #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c 63 64 #define NETSEC_REG_NRM_TX_PKTCNT 0x410 65 66 #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414 67 #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418 68 69 #define NETSEC_REG_NRM_TX_TMR 0x41c 70 71 #define NETSEC_REG_NRM_RX_PKTCNT 0x454 72 #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458 73 #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420 74 #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460 75 76 #define NETSEC_REG_NRM_RX_TMR 0x45c 77 78 #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434 79 #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408 80 #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474 81 #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448 82 83 #define NETSEC_REG_NRM_TX_CONFIG 0x430 84 #define NETSEC_REG_NRM_RX_CONFIG 0x470 85 86 #define MAC_REG_STATUS 0x1024 87 #define MAC_REG_DATA 0x11c0 88 #define MAC_REG_CMD 0x11c4 89 #define MAC_REG_FLOW_TH 0x11cc 90 #define MAC_REG_INTF_SEL 0x11d4 91 #define MAC_REG_DESC_INIT 0x11fc 92 #define MAC_REG_DESC_SOFT_RST 0x1204 93 #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500 94 95 #define GMAC_REG_MCR 0x0000 96 #define GMAC_REG_MFFR 0x0004 97 #define GMAC_REG_GAR 0x0010 98 #define GMAC_REG_GDR 0x0014 99 #define GMAC_REG_FCR 0x0018 100 #define GMAC_REG_BMR 0x1000 101 #define GMAC_REG_RDLAR 0x100c 102 #define GMAC_REG_TDLAR 0x1010 103 #define GMAC_REG_OMR 0x1018 104 105 #define MHZ(n) ((n) * 1000 * 1000) 106 107 #define NETSEC_TX_SHIFT_OWN_FIELD 31 108 #define NETSEC_TX_SHIFT_LD_FIELD 30 109 #define NETSEC_TX_SHIFT_DRID_FIELD 24 110 #define NETSEC_TX_SHIFT_PT_FIELD 21 111 #define NETSEC_TX_SHIFT_TDRID_FIELD 16 112 #define NETSEC_TX_SHIFT_CC_FIELD 15 113 #define NETSEC_TX_SHIFT_FS_FIELD 9 114 #define NETSEC_TX_LAST 8 115 #define NETSEC_TX_SHIFT_CO 7 116 #define NETSEC_TX_SHIFT_SO 6 117 #define NETSEC_TX_SHIFT_TRS_FIELD 4 118 119 #define NETSEC_RX_PKT_OWN_FIELD 31 120 #define NETSEC_RX_PKT_LD_FIELD 30 121 #define NETSEC_RX_PKT_SDRID_FIELD 24 122 #define NETSEC_RX_PKT_FR_FIELD 23 123 #define NETSEC_RX_PKT_ER_FIELD 21 124 #define NETSEC_RX_PKT_ERR_FIELD 16 125 #define NETSEC_RX_PKT_TDRID_FIELD 12 126 #define NETSEC_RX_PKT_FS_FIELD 9 127 #define NETSEC_RX_PKT_LS_FIELD 8 128 #define NETSEC_RX_PKT_CO_FIELD 6 129 130 #define NETSEC_RX_PKT_ERR_MASK 3 131 132 #define NETSEC_MAX_TX_PKT_LEN 1518 133 #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018 134 135 #define NETSEC_RING_GMAC 15 136 #define NETSEC_RING_MAX 2 137 138 #define NETSEC_TCP_SEG_LEN_MAX 1460 139 #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960 140 141 #define NETSEC_RX_CKSUM_NOTAVAIL 0 142 #define NETSEC_RX_CKSUM_OK 1 143 #define NETSEC_RX_CKSUM_NG 2 144 145 #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20) 146 #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4) 147 148 #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20) 149 #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19) 150 151 #define NETSEC_INT_PKTCNT_MAX 2047 152 153 #define NETSEC_FLOW_START_TH_MAX 95 154 #define NETSEC_FLOW_STOP_TH_MAX 95 155 #define NETSEC_FLOW_PAUSE_TIME_MIN 5 156 157 #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f 158 159 #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28) 160 #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27) 161 #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3) 162 #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2) 163 #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1) 164 #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0) 165 166 #define NETSEC_CLK_EN_REG_DOM_G BIT(5) 167 #define NETSEC_CLK_EN_REG_DOM_C BIT(1) 168 #define NETSEC_CLK_EN_REG_DOM_D BIT(0) 169 170 #define NETSEC_COM_INIT_REG_DB BIT(2) 171 #define NETSEC_COM_INIT_REG_CLS BIT(1) 172 #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \ 173 NETSEC_COM_INIT_REG_DB) 174 175 #define NETSEC_SOFT_RST_REG_RESET 0 176 #define NETSEC_SOFT_RST_REG_RUN BIT(31) 177 178 #define NETSEC_DMA_CTRL_REG_STOP 1 179 #define MH_CTRL__MODE_TRANS BIT(20) 180 181 #define NETSEC_GMAC_CMD_ST_READ 0 182 #define NETSEC_GMAC_CMD_ST_WRITE BIT(28) 183 #define NETSEC_GMAC_CMD_ST_BUSY BIT(31) 184 185 #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080 186 #define NETSEC_GMAC_BMR_REG_RESET 0x00020181 187 #define NETSEC_GMAC_BMR_REG_SWR 0x00000001 188 189 #define NETSEC_GMAC_OMR_REG_ST BIT(13) 190 #define NETSEC_GMAC_OMR_REG_SR BIT(1) 191 192 #define NETSEC_GMAC_MCR_REG_IBN BIT(30) 193 #define NETSEC_GMAC_MCR_REG_CST BIT(25) 194 #define NETSEC_GMAC_MCR_REG_JE BIT(20) 195 #define NETSEC_MCR_PS BIT(15) 196 #define NETSEC_GMAC_MCR_REG_FES BIT(14) 197 #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c 198 #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c 199 200 #define NETSEC_FCR_RFE BIT(2) 201 #define NETSEC_FCR_TFE BIT(1) 202 203 #define NETSEC_GMAC_GAR_REG_GW BIT(1) 204 #define NETSEC_GMAC_GAR_REG_GB BIT(0) 205 206 #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11 207 #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6 208 #define GMAC_REG_SHIFT_CR_GAR 2 209 210 #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2 211 #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3 212 #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0 213 #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1 214 #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4 215 #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5 216 217 #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000 218 #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000 219 220 #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000 221 222 #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31) 223 #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30) 224 #define NETSEC_REG_DESC_TMR_MODE 4 225 #define NETSEC_REG_DESC_ENDIAN 0 226 227 #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1 228 #define NETSEC_MAC_DESC_INIT_REG_INIT 1 229 230 #define NETSEC_EEPROM_MAC_ADDRESS 0x00 231 #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08 232 #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C 233 #define NETSEC_EEPROM_HM_ME_SIZE 0x10 234 #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14 235 #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18 236 #define NETSEC_EEPROM_MH_ME_SIZE 0x1C 237 #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20 238 #define NETSEC_EEPROM_PKT_ME_SIZE 0x24 239 240 #define DESC_NUM 256 241 242 #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 243 #define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \ 244 NET_IP_ALIGN) 245 #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \ 246 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 247 #define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA) 248 249 #define DESC_SZ sizeof(struct netsec_de) 250 251 #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000) 252 253 #define NETSEC_XDP_PASS 0 254 #define NETSEC_XDP_CONSUMED BIT(0) 255 #define NETSEC_XDP_TX BIT(1) 256 #define NETSEC_XDP_REDIR BIT(2) 257 258 enum ring_id { 259 NETSEC_RING_TX = 0, 260 NETSEC_RING_RX 261 }; 262 263 enum buf_type { 264 TYPE_NETSEC_SKB = 0, 265 TYPE_NETSEC_XDP_TX, 266 TYPE_NETSEC_XDP_NDO, 267 }; 268 269 struct netsec_desc { 270 union { 271 struct sk_buff *skb; 272 struct xdp_frame *xdpf; 273 }; 274 dma_addr_t dma_addr; 275 void *addr; 276 u16 len; 277 u8 buf_type; 278 }; 279 280 struct netsec_desc_ring { 281 dma_addr_t desc_dma; 282 struct netsec_desc *desc; 283 void *vaddr; 284 u16 head, tail; 285 u16 xdp_xmit; /* netsec_xdp_xmit packets */ 286 struct page_pool *page_pool; 287 struct xdp_rxq_info xdp_rxq; 288 spinlock_t lock; /* XDP tx queue locking */ 289 }; 290 291 struct netsec_priv { 292 struct netsec_desc_ring desc_ring[NETSEC_RING_MAX]; 293 struct ethtool_coalesce et_coalesce; 294 struct bpf_prog *xdp_prog; 295 spinlock_t reglock; /* protect reg access */ 296 struct napi_struct napi; 297 phy_interface_t phy_interface; 298 struct net_device *ndev; 299 struct device_node *phy_np; 300 struct phy_device *phydev; 301 struct mii_bus *mii_bus; 302 void __iomem *ioaddr; 303 void __iomem *eeprom_base; 304 struct device *dev; 305 struct clk *clk; 306 u32 msg_enable; 307 u32 freq; 308 u32 phy_addr; 309 bool rx_cksum_offload_flag; 310 }; 311 312 struct netsec_de { /* Netsec Descriptor layout */ 313 u32 attr; 314 u32 data_buf_addr_up; 315 u32 data_buf_addr_lw; 316 u32 buf_len_info; 317 }; 318 319 struct netsec_tx_pkt_ctrl { 320 u16 tcp_seg_len; 321 bool tcp_seg_offload_flag; 322 bool cksum_offload_flag; 323 }; 324 325 struct netsec_rx_pkt_info { 326 int rx_cksum_result; 327 int err_code; 328 bool err_flag; 329 }; 330 331 static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val) 332 { 333 writel(val, priv->ioaddr + reg_addr); 334 } 335 336 static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr) 337 { 338 return readl(priv->ioaddr + reg_addr); 339 } 340 341 /************* MDIO BUS OPS FOLLOW *************/ 342 343 #define TIMEOUT_SPINS_MAC 1000 344 #define TIMEOUT_SECONDARY_MS_MAC 100 345 346 static u32 netsec_clk_type(u32 freq) 347 { 348 if (freq < MHZ(35)) 349 return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ; 350 if (freq < MHZ(60)) 351 return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ; 352 if (freq < MHZ(100)) 353 return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ; 354 if (freq < MHZ(150)) 355 return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ; 356 if (freq < MHZ(250)) 357 return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ; 358 359 return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ; 360 } 361 362 static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask) 363 { 364 u32 timeout = TIMEOUT_SPINS_MAC; 365 366 while (--timeout && netsec_read(priv, addr) & mask) 367 cpu_relax(); 368 if (timeout) 369 return 0; 370 371 timeout = TIMEOUT_SECONDARY_MS_MAC; 372 while (--timeout && netsec_read(priv, addr) & mask) 373 usleep_range(1000, 2000); 374 375 if (timeout) 376 return 0; 377 378 netdev_WARN(priv->ndev, "%s: timeout\n", __func__); 379 380 return -ETIMEDOUT; 381 } 382 383 static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value) 384 { 385 netsec_write(priv, MAC_REG_DATA, value); 386 netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE); 387 return netsec_wait_while_busy(priv, 388 MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY); 389 } 390 391 static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read) 392 { 393 int ret; 394 395 netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ); 396 ret = netsec_wait_while_busy(priv, 397 MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY); 398 if (ret) 399 return ret; 400 401 *read = netsec_read(priv, MAC_REG_DATA); 402 403 return 0; 404 } 405 406 static int netsec_mac_wait_while_busy(struct netsec_priv *priv, 407 u32 addr, u32 mask) 408 { 409 u32 timeout = TIMEOUT_SPINS_MAC; 410 int ret, data; 411 412 do { 413 ret = netsec_mac_read(priv, addr, &data); 414 if (ret) 415 break; 416 cpu_relax(); 417 } while (--timeout && (data & mask)); 418 419 if (timeout) 420 return 0; 421 422 timeout = TIMEOUT_SECONDARY_MS_MAC; 423 do { 424 usleep_range(1000, 2000); 425 426 ret = netsec_mac_read(priv, addr, &data); 427 if (ret) 428 break; 429 cpu_relax(); 430 } while (--timeout && (data & mask)); 431 432 if (timeout && !ret) 433 return 0; 434 435 netdev_WARN(priv->ndev, "%s: timeout\n", __func__); 436 437 return -ETIMEDOUT; 438 } 439 440 static int netsec_mac_update_to_phy_state(struct netsec_priv *priv) 441 { 442 struct phy_device *phydev = priv->ndev->phydev; 443 u32 value = 0; 444 445 value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON : 446 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON; 447 448 if (phydev->speed != SPEED_1000) 449 value |= NETSEC_MCR_PS; 450 451 if (priv->phy_interface != PHY_INTERFACE_MODE_GMII && 452 phydev->speed == SPEED_100) 453 value |= NETSEC_GMAC_MCR_REG_FES; 454 455 value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE; 456 457 if (phy_interface_mode_is_rgmii(priv->phy_interface)) 458 value |= NETSEC_GMAC_MCR_REG_IBN; 459 460 if (netsec_mac_write(priv, GMAC_REG_MCR, value)) 461 return -ETIMEDOUT; 462 463 return 0; 464 } 465 466 static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr); 467 468 static int netsec_phy_write(struct mii_bus *bus, 469 int phy_addr, int reg, u16 val) 470 { 471 int status; 472 struct netsec_priv *priv = bus->priv; 473 474 if (netsec_mac_write(priv, GMAC_REG_GDR, val)) 475 return -ETIMEDOUT; 476 if (netsec_mac_write(priv, GMAC_REG_GAR, 477 phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA | 478 reg << NETSEC_GMAC_GAR_REG_SHIFT_GR | 479 NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB | 480 (netsec_clk_type(priv->freq) << 481 GMAC_REG_SHIFT_CR_GAR))) 482 return -ETIMEDOUT; 483 484 status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, 485 NETSEC_GMAC_GAR_REG_GB); 486 487 /* Developerbox implements RTL8211E PHY and there is 488 * a compatibility problem with F_GMAC4. 489 * RTL8211E expects MDC clock must be kept toggling for several 490 * clock cycle with MDIO high before entering the IDLE state. 491 * To meet this requirement, netsec driver needs to issue dummy 492 * read(e.g. read PHYID1(offset 0x2) register) right after write. 493 */ 494 netsec_phy_read(bus, phy_addr, MII_PHYSID1); 495 496 return status; 497 } 498 499 static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr) 500 { 501 struct netsec_priv *priv = bus->priv; 502 u32 data; 503 int ret; 504 505 if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB | 506 phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA | 507 reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR | 508 (netsec_clk_type(priv->freq) << 509 GMAC_REG_SHIFT_CR_GAR))) 510 return -ETIMEDOUT; 511 512 ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, 513 NETSEC_GMAC_GAR_REG_GB); 514 if (ret) 515 return ret; 516 517 ret = netsec_mac_read(priv, GMAC_REG_GDR, &data); 518 if (ret) 519 return ret; 520 521 return data; 522 } 523 524 /************* ETHTOOL_OPS FOLLOW *************/ 525 526 static void netsec_et_get_drvinfo(struct net_device *net_device, 527 struct ethtool_drvinfo *info) 528 { 529 strlcpy(info->driver, "netsec", sizeof(info->driver)); 530 strlcpy(info->bus_info, dev_name(net_device->dev.parent), 531 sizeof(info->bus_info)); 532 } 533 534 static int netsec_et_get_coalesce(struct net_device *net_device, 535 struct ethtool_coalesce *et_coalesce) 536 { 537 struct netsec_priv *priv = netdev_priv(net_device); 538 539 *et_coalesce = priv->et_coalesce; 540 541 return 0; 542 } 543 544 static int netsec_et_set_coalesce(struct net_device *net_device, 545 struct ethtool_coalesce *et_coalesce) 546 { 547 struct netsec_priv *priv = netdev_priv(net_device); 548 549 priv->et_coalesce = *et_coalesce; 550 551 if (priv->et_coalesce.tx_coalesce_usecs < 50) 552 priv->et_coalesce.tx_coalesce_usecs = 50; 553 if (priv->et_coalesce.tx_max_coalesced_frames < 1) 554 priv->et_coalesce.tx_max_coalesced_frames = 1; 555 556 netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT, 557 priv->et_coalesce.tx_max_coalesced_frames); 558 netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR, 559 priv->et_coalesce.tx_coalesce_usecs); 560 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE); 561 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP); 562 563 if (priv->et_coalesce.rx_coalesce_usecs < 50) 564 priv->et_coalesce.rx_coalesce_usecs = 50; 565 if (priv->et_coalesce.rx_max_coalesced_frames < 1) 566 priv->et_coalesce.rx_max_coalesced_frames = 1; 567 568 netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT, 569 priv->et_coalesce.rx_max_coalesced_frames); 570 netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR, 571 priv->et_coalesce.rx_coalesce_usecs); 572 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT); 573 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP); 574 575 return 0; 576 } 577 578 static u32 netsec_et_get_msglevel(struct net_device *dev) 579 { 580 struct netsec_priv *priv = netdev_priv(dev); 581 582 return priv->msg_enable; 583 } 584 585 static void netsec_et_set_msglevel(struct net_device *dev, u32 datum) 586 { 587 struct netsec_priv *priv = netdev_priv(dev); 588 589 priv->msg_enable = datum; 590 } 591 592 static const struct ethtool_ops netsec_ethtool_ops = { 593 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 594 ETHTOOL_COALESCE_MAX_FRAMES, 595 .get_drvinfo = netsec_et_get_drvinfo, 596 .get_link_ksettings = phy_ethtool_get_link_ksettings, 597 .set_link_ksettings = phy_ethtool_set_link_ksettings, 598 .get_link = ethtool_op_get_link, 599 .get_coalesce = netsec_et_get_coalesce, 600 .set_coalesce = netsec_et_set_coalesce, 601 .get_msglevel = netsec_et_get_msglevel, 602 .set_msglevel = netsec_et_set_msglevel, 603 }; 604 605 /************* NETDEV_OPS FOLLOW *************/ 606 607 608 static void netsec_set_rx_de(struct netsec_priv *priv, 609 struct netsec_desc_ring *dring, u16 idx, 610 const struct netsec_desc *desc) 611 { 612 struct netsec_de *de = dring->vaddr + DESC_SZ * idx; 613 u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) | 614 (1 << NETSEC_RX_PKT_FS_FIELD) | 615 (1 << NETSEC_RX_PKT_LS_FIELD); 616 617 if (idx == DESC_NUM - 1) 618 attr |= (1 << NETSEC_RX_PKT_LD_FIELD); 619 620 de->data_buf_addr_up = upper_32_bits(desc->dma_addr); 621 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); 622 de->buf_len_info = desc->len; 623 de->attr = attr; 624 dma_wmb(); 625 626 dring->desc[idx].dma_addr = desc->dma_addr; 627 dring->desc[idx].addr = desc->addr; 628 dring->desc[idx].len = desc->len; 629 } 630 631 static bool netsec_clean_tx_dring(struct netsec_priv *priv) 632 { 633 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; 634 struct xdp_frame_bulk bq; 635 struct netsec_de *entry; 636 int tail = dring->tail; 637 unsigned int bytes; 638 int cnt = 0; 639 640 spin_lock(&dring->lock); 641 642 bytes = 0; 643 xdp_frame_bulk_init(&bq); 644 entry = dring->vaddr + DESC_SZ * tail; 645 646 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 647 648 while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) && 649 cnt < DESC_NUM) { 650 struct netsec_desc *desc; 651 int eop; 652 653 desc = &dring->desc[tail]; 654 eop = (entry->attr >> NETSEC_TX_LAST) & 1; 655 dma_rmb(); 656 657 /* if buf_type is either TYPE_NETSEC_SKB or 658 * TYPE_NETSEC_XDP_NDO we mapped it 659 */ 660 if (desc->buf_type != TYPE_NETSEC_XDP_TX) 661 dma_unmap_single(priv->dev, desc->dma_addr, desc->len, 662 DMA_TO_DEVICE); 663 664 if (!eop) 665 goto next; 666 667 if (desc->buf_type == TYPE_NETSEC_SKB) { 668 bytes += desc->skb->len; 669 dev_kfree_skb(desc->skb); 670 } else { 671 bytes += desc->xdpf->len; 672 if (desc->buf_type == TYPE_NETSEC_XDP_TX) 673 xdp_return_frame_rx_napi(desc->xdpf); 674 else 675 xdp_return_frame_bulk(desc->xdpf, &bq); 676 } 677 next: 678 /* clean up so netsec_uninit_pkt_dring() won't free the skb 679 * again 680 */ 681 *desc = (struct netsec_desc){}; 682 683 /* entry->attr is not going to be accessed by the NIC until 684 * netsec_set_tx_de() is called. No need for a dma_wmb() here 685 */ 686 entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; 687 /* move tail ahead */ 688 dring->tail = (tail + 1) % DESC_NUM; 689 690 tail = dring->tail; 691 entry = dring->vaddr + DESC_SZ * tail; 692 cnt++; 693 } 694 xdp_flush_frame_bulk(&bq); 695 696 rcu_read_unlock(); 697 698 spin_unlock(&dring->lock); 699 700 if (!cnt) 701 return false; 702 703 /* reading the register clears the irq */ 704 netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT); 705 706 priv->ndev->stats.tx_packets += cnt; 707 priv->ndev->stats.tx_bytes += bytes; 708 709 netdev_completed_queue(priv->ndev, cnt, bytes); 710 711 return true; 712 } 713 714 static void netsec_process_tx(struct netsec_priv *priv) 715 { 716 struct net_device *ndev = priv->ndev; 717 bool cleaned; 718 719 cleaned = netsec_clean_tx_dring(priv); 720 721 if (cleaned && netif_queue_stopped(ndev)) { 722 /* Make sure we update the value, anyone stopping the queue 723 * after this will read the proper consumer idx 724 */ 725 smp_wmb(); 726 netif_wake_queue(ndev); 727 } 728 } 729 730 static void *netsec_alloc_rx_data(struct netsec_priv *priv, 731 dma_addr_t *dma_handle, u16 *desc_len) 732 733 { 734 735 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; 736 struct page *page; 737 738 page = page_pool_dev_alloc_pages(dring->page_pool); 739 if (!page) 740 return NULL; 741 742 /* We allocate the same buffer length for XDP and non-XDP cases. 743 * page_pool API will map the whole page, skip what's needed for 744 * network payloads and/or XDP 745 */ 746 *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM; 747 /* Make sure the incoming payload fits in the page for XDP and non-XDP 748 * cases and reserve enough space for headroom + skb_shared_info 749 */ 750 *desc_len = NETSEC_RX_BUF_SIZE; 751 752 return page_address(page); 753 } 754 755 static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num) 756 { 757 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; 758 u16 idx = from; 759 760 while (num) { 761 netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]); 762 idx++; 763 if (idx >= DESC_NUM) 764 idx = 0; 765 num--; 766 } 767 } 768 769 static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts) 770 { 771 if (likely(pkts)) 772 netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts); 773 } 774 775 static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res, 776 u16 pkts) 777 { 778 if (xdp_res & NETSEC_XDP_REDIR) 779 xdp_do_flush_map(); 780 781 if (xdp_res & NETSEC_XDP_TX) 782 netsec_xdp_ring_tx_db(priv, pkts); 783 } 784 785 static void netsec_set_tx_de(struct netsec_priv *priv, 786 struct netsec_desc_ring *dring, 787 const struct netsec_tx_pkt_ctrl *tx_ctrl, 788 const struct netsec_desc *desc, void *buf) 789 { 790 int idx = dring->head; 791 struct netsec_de *de; 792 u32 attr; 793 794 de = dring->vaddr + (DESC_SZ * idx); 795 796 attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) | 797 (1 << NETSEC_TX_SHIFT_PT_FIELD) | 798 (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) | 799 (1 << NETSEC_TX_SHIFT_FS_FIELD) | 800 (1 << NETSEC_TX_LAST) | 801 (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) | 802 (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) | 803 (1 << NETSEC_TX_SHIFT_TRS_FIELD); 804 if (idx == DESC_NUM - 1) 805 attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD); 806 807 de->data_buf_addr_up = upper_32_bits(desc->dma_addr); 808 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); 809 de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len; 810 de->attr = attr; 811 812 dring->desc[idx] = *desc; 813 if (desc->buf_type == TYPE_NETSEC_SKB) 814 dring->desc[idx].skb = buf; 815 else if (desc->buf_type == TYPE_NETSEC_XDP_TX || 816 desc->buf_type == TYPE_NETSEC_XDP_NDO) 817 dring->desc[idx].xdpf = buf; 818 819 /* move head ahead */ 820 dring->head = (dring->head + 1) % DESC_NUM; 821 } 822 823 /* The current driver only supports 1 Txq, this should run under spin_lock() */ 824 static u32 netsec_xdp_queue_one(struct netsec_priv *priv, 825 struct xdp_frame *xdpf, bool is_ndo) 826 827 { 828 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; 829 struct page *page = virt_to_page(xdpf->data); 830 struct netsec_tx_pkt_ctrl tx_ctrl = {}; 831 struct netsec_desc tx_desc; 832 dma_addr_t dma_handle; 833 u16 filled; 834 835 if (tx_ring->head >= tx_ring->tail) 836 filled = tx_ring->head - tx_ring->tail; 837 else 838 filled = tx_ring->head + DESC_NUM - tx_ring->tail; 839 840 if (DESC_NUM - filled <= 1) 841 return NETSEC_XDP_CONSUMED; 842 843 if (is_ndo) { 844 /* this is for ndo_xdp_xmit, the buffer needs mapping before 845 * sending 846 */ 847 dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len, 848 DMA_TO_DEVICE); 849 if (dma_mapping_error(priv->dev, dma_handle)) 850 return NETSEC_XDP_CONSUMED; 851 tx_desc.buf_type = TYPE_NETSEC_XDP_NDO; 852 } else { 853 /* This is the device Rx buffer from page_pool. No need to remap 854 * just sync and send it 855 */ 856 struct netsec_desc_ring *rx_ring = 857 &priv->desc_ring[NETSEC_RING_RX]; 858 enum dma_data_direction dma_dir = 859 page_pool_get_dma_dir(rx_ring->page_pool); 860 861 dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom + 862 sizeof(*xdpf); 863 dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len, 864 dma_dir); 865 tx_desc.buf_type = TYPE_NETSEC_XDP_TX; 866 } 867 868 tx_desc.dma_addr = dma_handle; 869 tx_desc.addr = xdpf->data; 870 tx_desc.len = xdpf->len; 871 872 netdev_sent_queue(priv->ndev, xdpf->len); 873 netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf); 874 875 return NETSEC_XDP_TX; 876 } 877 878 static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp) 879 { 880 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; 881 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 882 u32 ret; 883 884 if (unlikely(!xdpf)) 885 return NETSEC_XDP_CONSUMED; 886 887 spin_lock(&tx_ring->lock); 888 ret = netsec_xdp_queue_one(priv, xdpf, false); 889 spin_unlock(&tx_ring->lock); 890 891 return ret; 892 } 893 894 static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, 895 struct xdp_buff *xdp) 896 { 897 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; 898 unsigned int sync, len = xdp->data_end - xdp->data; 899 u32 ret = NETSEC_XDP_PASS; 900 struct page *page; 901 int err; 902 u32 act; 903 904 act = bpf_prog_run_xdp(prog, xdp); 905 906 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 907 sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM; 908 sync = max(sync, len); 909 910 switch (act) { 911 case XDP_PASS: 912 ret = NETSEC_XDP_PASS; 913 break; 914 case XDP_TX: 915 ret = netsec_xdp_xmit_back(priv, xdp); 916 if (ret != NETSEC_XDP_TX) { 917 page = virt_to_head_page(xdp->data); 918 page_pool_put_page(dring->page_pool, page, sync, true); 919 } 920 break; 921 case XDP_REDIRECT: 922 err = xdp_do_redirect(priv->ndev, xdp, prog); 923 if (!err) { 924 ret = NETSEC_XDP_REDIR; 925 } else { 926 ret = NETSEC_XDP_CONSUMED; 927 page = virt_to_head_page(xdp->data); 928 page_pool_put_page(dring->page_pool, page, sync, true); 929 } 930 break; 931 default: 932 bpf_warn_invalid_xdp_action(act); 933 fallthrough; 934 case XDP_ABORTED: 935 trace_xdp_exception(priv->ndev, prog, act); 936 fallthrough; /* handle aborts by dropping packet */ 937 case XDP_DROP: 938 ret = NETSEC_XDP_CONSUMED; 939 page = virt_to_head_page(xdp->data); 940 page_pool_put_page(dring->page_pool, page, sync, true); 941 break; 942 } 943 944 return ret; 945 } 946 947 static int netsec_process_rx(struct netsec_priv *priv, int budget) 948 { 949 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; 950 struct net_device *ndev = priv->ndev; 951 struct netsec_rx_pkt_info rx_info; 952 enum dma_data_direction dma_dir; 953 struct bpf_prog *xdp_prog; 954 struct xdp_buff xdp; 955 u16 xdp_xmit = 0; 956 u32 xdp_act = 0; 957 int done = 0; 958 959 xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq); 960 961 xdp_prog = READ_ONCE(priv->xdp_prog); 962 dma_dir = page_pool_get_dma_dir(dring->page_pool); 963 964 while (done < budget) { 965 u16 idx = dring->tail; 966 struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); 967 struct netsec_desc *desc = &dring->desc[idx]; 968 struct page *page = virt_to_page(desc->addr); 969 u32 xdp_result = NETSEC_XDP_PASS; 970 struct sk_buff *skb = NULL; 971 u16 pkt_len, desc_len; 972 dma_addr_t dma_handle; 973 void *buf_addr; 974 975 if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { 976 /* reading the register clears the irq */ 977 netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); 978 break; 979 } 980 981 /* This barrier is needed to keep us from reading 982 * any other fields out of the netsec_de until we have 983 * verified the descriptor has been written back 984 */ 985 dma_rmb(); 986 done++; 987 988 pkt_len = de->buf_len_info >> 16; 989 rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) & 990 NETSEC_RX_PKT_ERR_MASK; 991 rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1; 992 if (rx_info.err_flag) { 993 netif_err(priv, drv, priv->ndev, 994 "%s: rx fail err(%d)\n", __func__, 995 rx_info.err_code); 996 ndev->stats.rx_dropped++; 997 dring->tail = (dring->tail + 1) % DESC_NUM; 998 /* reuse buffer page frag */ 999 netsec_rx_fill(priv, idx, 1); 1000 continue; 1001 } 1002 rx_info.rx_cksum_result = 1003 (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3; 1004 1005 /* allocate a fresh buffer and map it to the hardware. 1006 * This will eventually replace the old buffer in the hardware 1007 */ 1008 buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len); 1009 1010 if (unlikely(!buf_addr)) 1011 break; 1012 1013 dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len, 1014 dma_dir); 1015 prefetch(desc->addr); 1016 1017 xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM, 1018 pkt_len, false); 1019 1020 if (xdp_prog) { 1021 xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp); 1022 if (xdp_result != NETSEC_XDP_PASS) { 1023 xdp_act |= xdp_result; 1024 if (xdp_result == NETSEC_XDP_TX) 1025 xdp_xmit++; 1026 goto next; 1027 } 1028 } 1029 skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA); 1030 1031 if (unlikely(!skb)) { 1032 /* If skb fails recycle_direct will either unmap and 1033 * free the page or refill the cache depending on the 1034 * cache state. Since we paid the allocation cost if 1035 * building an skb fails try to put the page into cache 1036 */ 1037 page_pool_put_page(dring->page_pool, page, pkt_len, 1038 true); 1039 netif_err(priv, drv, priv->ndev, 1040 "rx failed to build skb\n"); 1041 break; 1042 } 1043 page_pool_release_page(dring->page_pool, page); 1044 1045 skb_reserve(skb, xdp.data - xdp.data_hard_start); 1046 skb_put(skb, xdp.data_end - xdp.data); 1047 skb->protocol = eth_type_trans(skb, priv->ndev); 1048 1049 if (priv->rx_cksum_offload_flag && 1050 rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK) 1051 skb->ip_summed = CHECKSUM_UNNECESSARY; 1052 1053 next: 1054 if (skb) 1055 napi_gro_receive(&priv->napi, skb); 1056 if (skb || xdp_result) { 1057 ndev->stats.rx_packets++; 1058 ndev->stats.rx_bytes += xdp.data_end - xdp.data; 1059 } 1060 1061 /* Update the descriptor with fresh buffers */ 1062 desc->len = desc_len; 1063 desc->dma_addr = dma_handle; 1064 desc->addr = buf_addr; 1065 1066 netsec_rx_fill(priv, idx, 1); 1067 dring->tail = (dring->tail + 1) % DESC_NUM; 1068 } 1069 netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit); 1070 1071 return done; 1072 } 1073 1074 static int netsec_napi_poll(struct napi_struct *napi, int budget) 1075 { 1076 struct netsec_priv *priv; 1077 int done; 1078 1079 priv = container_of(napi, struct netsec_priv, napi); 1080 1081 netsec_process_tx(priv); 1082 done = netsec_process_rx(priv, budget); 1083 1084 if (done < budget && napi_complete_done(napi, done)) { 1085 unsigned long flags; 1086 1087 spin_lock_irqsave(&priv->reglock, flags); 1088 netsec_write(priv, NETSEC_REG_INTEN_SET, 1089 NETSEC_IRQ_RX | NETSEC_IRQ_TX); 1090 spin_unlock_irqrestore(&priv->reglock, flags); 1091 } 1092 1093 return done; 1094 } 1095 1096 1097 static int netsec_desc_used(struct netsec_desc_ring *dring) 1098 { 1099 int used; 1100 1101 if (dring->head >= dring->tail) 1102 used = dring->head - dring->tail; 1103 else 1104 used = dring->head + DESC_NUM - dring->tail; 1105 1106 return used; 1107 } 1108 1109 static int netsec_check_stop_tx(struct netsec_priv *priv, int used) 1110 { 1111 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; 1112 1113 /* keep tail from touching the queue */ 1114 if (DESC_NUM - used < 2) { 1115 netif_stop_queue(priv->ndev); 1116 1117 /* Make sure we read the updated value in case 1118 * descriptors got freed 1119 */ 1120 smp_rmb(); 1121 1122 used = netsec_desc_used(dring); 1123 if (DESC_NUM - used < 2) 1124 return NETDEV_TX_BUSY; 1125 1126 netif_wake_queue(priv->ndev); 1127 } 1128 1129 return 0; 1130 } 1131 1132 static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, 1133 struct net_device *ndev) 1134 { 1135 struct netsec_priv *priv = netdev_priv(ndev); 1136 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; 1137 struct netsec_tx_pkt_ctrl tx_ctrl = {}; 1138 struct netsec_desc tx_desc; 1139 u16 tso_seg_len = 0; 1140 int filled; 1141 1142 spin_lock_bh(&dring->lock); 1143 filled = netsec_desc_used(dring); 1144 if (netsec_check_stop_tx(priv, filled)) { 1145 spin_unlock_bh(&dring->lock); 1146 net_warn_ratelimited("%s %s Tx queue full\n", 1147 dev_name(priv->dev), ndev->name); 1148 return NETDEV_TX_BUSY; 1149 } 1150 1151 if (skb->ip_summed == CHECKSUM_PARTIAL) 1152 tx_ctrl.cksum_offload_flag = true; 1153 1154 if (skb_is_gso(skb)) 1155 tso_seg_len = skb_shinfo(skb)->gso_size; 1156 1157 if (tso_seg_len > 0) { 1158 if (skb->protocol == htons(ETH_P_IP)) { 1159 ip_hdr(skb)->tot_len = 0; 1160 tcp_hdr(skb)->check = 1161 ~tcp_v4_check(0, ip_hdr(skb)->saddr, 1162 ip_hdr(skb)->daddr, 0); 1163 } else { 1164 tcp_v6_gso_csum_prep(skb); 1165 } 1166 1167 tx_ctrl.tcp_seg_offload_flag = true; 1168 tx_ctrl.tcp_seg_len = tso_seg_len; 1169 } 1170 1171 tx_desc.dma_addr = dma_map_single(priv->dev, skb->data, 1172 skb_headlen(skb), DMA_TO_DEVICE); 1173 if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) { 1174 spin_unlock_bh(&dring->lock); 1175 netif_err(priv, drv, priv->ndev, 1176 "%s: DMA mapping failed\n", __func__); 1177 ndev->stats.tx_dropped++; 1178 dev_kfree_skb_any(skb); 1179 return NETDEV_TX_OK; 1180 } 1181 tx_desc.addr = skb->data; 1182 tx_desc.len = skb_headlen(skb); 1183 tx_desc.buf_type = TYPE_NETSEC_SKB; 1184 1185 skb_tx_timestamp(skb); 1186 netdev_sent_queue(priv->ndev, skb->len); 1187 1188 netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb); 1189 spin_unlock_bh(&dring->lock); 1190 netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */ 1191 1192 return NETDEV_TX_OK; 1193 } 1194 1195 static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) 1196 { 1197 struct netsec_desc_ring *dring = &priv->desc_ring[id]; 1198 struct netsec_desc *desc; 1199 u16 idx; 1200 1201 if (!dring->vaddr || !dring->desc) 1202 return; 1203 for (idx = 0; idx < DESC_NUM; idx++) { 1204 desc = &dring->desc[idx]; 1205 if (!desc->addr) 1206 continue; 1207 1208 if (id == NETSEC_RING_RX) { 1209 struct page *page = virt_to_page(desc->addr); 1210 1211 page_pool_put_full_page(dring->page_pool, page, false); 1212 } else if (id == NETSEC_RING_TX) { 1213 dma_unmap_single(priv->dev, desc->dma_addr, desc->len, 1214 DMA_TO_DEVICE); 1215 dev_kfree_skb(desc->skb); 1216 } 1217 } 1218 1219 /* Rx is currently using page_pool */ 1220 if (id == NETSEC_RING_RX) { 1221 if (xdp_rxq_info_is_reg(&dring->xdp_rxq)) 1222 xdp_rxq_info_unreg(&dring->xdp_rxq); 1223 page_pool_destroy(dring->page_pool); 1224 } 1225 1226 memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); 1227 memset(dring->vaddr, 0, DESC_SZ * DESC_NUM); 1228 1229 dring->head = 0; 1230 dring->tail = 0; 1231 1232 if (id == NETSEC_RING_TX) 1233 netdev_reset_queue(priv->ndev); 1234 } 1235 1236 static void netsec_free_dring(struct netsec_priv *priv, int id) 1237 { 1238 struct netsec_desc_ring *dring = &priv->desc_ring[id]; 1239 1240 if (dring->vaddr) { 1241 dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM, 1242 dring->vaddr, dring->desc_dma); 1243 dring->vaddr = NULL; 1244 } 1245 1246 kfree(dring->desc); 1247 dring->desc = NULL; 1248 } 1249 1250 static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id) 1251 { 1252 struct netsec_desc_ring *dring = &priv->desc_ring[id]; 1253 1254 dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, 1255 &dring->desc_dma, GFP_KERNEL); 1256 if (!dring->vaddr) 1257 goto err; 1258 1259 dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL); 1260 if (!dring->desc) 1261 goto err; 1262 1263 return 0; 1264 err: 1265 netsec_free_dring(priv, id); 1266 1267 return -ENOMEM; 1268 } 1269 1270 static void netsec_setup_tx_dring(struct netsec_priv *priv) 1271 { 1272 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; 1273 int i; 1274 1275 for (i = 0; i < DESC_NUM; i++) { 1276 struct netsec_de *de; 1277 1278 de = dring->vaddr + (DESC_SZ * i); 1279 /* de->attr is not going to be accessed by the NIC 1280 * until netsec_set_tx_de() is called. 1281 * No need for a dma_wmb() here 1282 */ 1283 de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; 1284 } 1285 } 1286 1287 static int netsec_setup_rx_dring(struct netsec_priv *priv) 1288 { 1289 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; 1290 struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog); 1291 struct page_pool_params pp_params = { 1292 .order = 0, 1293 /* internal DMA mapping in page_pool */ 1294 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 1295 .pool_size = DESC_NUM, 1296 .nid = NUMA_NO_NODE, 1297 .dev = priv->dev, 1298 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 1299 .offset = NETSEC_RXBUF_HEADROOM, 1300 .max_len = NETSEC_RX_BUF_SIZE, 1301 }; 1302 int i, err; 1303 1304 dring->page_pool = page_pool_create(&pp_params); 1305 if (IS_ERR(dring->page_pool)) { 1306 err = PTR_ERR(dring->page_pool); 1307 dring->page_pool = NULL; 1308 goto err_out; 1309 } 1310 1311 err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0, priv->napi.napi_id); 1312 if (err) 1313 goto err_out; 1314 1315 err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL, 1316 dring->page_pool); 1317 if (err) 1318 goto err_out; 1319 1320 for (i = 0; i < DESC_NUM; i++) { 1321 struct netsec_desc *desc = &dring->desc[i]; 1322 dma_addr_t dma_handle; 1323 void *buf; 1324 u16 len; 1325 1326 buf = netsec_alloc_rx_data(priv, &dma_handle, &len); 1327 1328 if (!buf) { 1329 err = -ENOMEM; 1330 goto err_out; 1331 } 1332 desc->dma_addr = dma_handle; 1333 desc->addr = buf; 1334 desc->len = len; 1335 } 1336 1337 netsec_rx_fill(priv, 0, DESC_NUM); 1338 1339 return 0; 1340 1341 err_out: 1342 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); 1343 return err; 1344 } 1345 1346 static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg, 1347 u32 addr_h, u32 addr_l, u32 size) 1348 { 1349 u64 base = (u64)addr_h << 32 | addr_l; 1350 void __iomem *ucode; 1351 u32 i; 1352 1353 ucode = ioremap(base, size * sizeof(u32)); 1354 if (!ucode) 1355 return -ENOMEM; 1356 1357 for (i = 0; i < size; i++) 1358 netsec_write(priv, reg, readl(ucode + i * 4)); 1359 1360 iounmap(ucode); 1361 return 0; 1362 } 1363 1364 static int netsec_netdev_load_microcode(struct netsec_priv *priv) 1365 { 1366 u32 addr_h, addr_l, size; 1367 int err; 1368 1369 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H); 1370 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L); 1371 size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE); 1372 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF, 1373 addr_h, addr_l, size); 1374 if (err) 1375 return err; 1376 1377 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H); 1378 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L); 1379 size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE); 1380 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF, 1381 addr_h, addr_l, size); 1382 if (err) 1383 return err; 1384 1385 addr_h = 0; 1386 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS); 1387 size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE); 1388 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF, 1389 addr_h, addr_l, size); 1390 if (err) 1391 return err; 1392 1393 return 0; 1394 } 1395 1396 static int netsec_reset_hardware(struct netsec_priv *priv, 1397 bool load_ucode) 1398 { 1399 u32 value; 1400 int err; 1401 1402 /* stop DMA engines */ 1403 if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) { 1404 netsec_write(priv, NETSEC_REG_DMA_HM_CTRL, 1405 NETSEC_DMA_CTRL_REG_STOP); 1406 netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, 1407 NETSEC_DMA_CTRL_REG_STOP); 1408 1409 while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) & 1410 NETSEC_DMA_CTRL_REG_STOP) 1411 cpu_relax(); 1412 1413 while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) & 1414 NETSEC_DMA_CTRL_REG_STOP) 1415 cpu_relax(); 1416 } 1417 1418 netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET); 1419 netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN); 1420 netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL); 1421 1422 while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0) 1423 cpu_relax(); 1424 1425 /* set desc_start addr */ 1426 netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP, 1427 upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); 1428 netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW, 1429 lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); 1430 1431 netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP, 1432 upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); 1433 netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW, 1434 lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); 1435 1436 /* set normal tx dring ring config */ 1437 netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG, 1438 1 << NETSEC_REG_DESC_ENDIAN); 1439 netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG, 1440 1 << NETSEC_REG_DESC_ENDIAN); 1441 1442 if (load_ucode) { 1443 err = netsec_netdev_load_microcode(priv); 1444 if (err) { 1445 netif_err(priv, probe, priv->ndev, 1446 "%s: failed to load microcode (%d)\n", 1447 __func__, err); 1448 return err; 1449 } 1450 } 1451 1452 /* start DMA engines */ 1453 netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1); 1454 netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0); 1455 1456 usleep_range(1000, 2000); 1457 1458 if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) & 1459 NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) { 1460 netif_err(priv, probe, priv->ndev, 1461 "microengine start failed\n"); 1462 return -ENXIO; 1463 } 1464 netsec_write(priv, NETSEC_REG_TOP_STATUS, 1465 NETSEC_TOP_IRQ_REG_CODE_LOAD_END); 1466 1467 value = NETSEC_PKT_CTRL_REG_MODE_NRM; 1468 if (priv->ndev->mtu > ETH_DATA_LEN) 1469 value |= NETSEC_PKT_CTRL_REG_EN_JUMBO; 1470 1471 /* change to normal mode */ 1472 netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS); 1473 netsec_write(priv, NETSEC_REG_PKT_CTRL, value); 1474 1475 while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) & 1476 NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0) 1477 cpu_relax(); 1478 1479 /* clear any pending EMPTY/ERR irq status */ 1480 netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0); 1481 1482 /* Disable TX & RX intr */ 1483 netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0); 1484 1485 return 0; 1486 } 1487 1488 static int netsec_start_gmac(struct netsec_priv *priv) 1489 { 1490 struct phy_device *phydev = priv->ndev->phydev; 1491 u32 value = 0; 1492 int ret; 1493 1494 if (phydev->speed != SPEED_1000) 1495 value = (NETSEC_GMAC_MCR_REG_CST | 1496 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON); 1497 1498 if (netsec_mac_write(priv, GMAC_REG_MCR, value)) 1499 return -ETIMEDOUT; 1500 if (netsec_mac_write(priv, GMAC_REG_BMR, 1501 NETSEC_GMAC_BMR_REG_RESET)) 1502 return -ETIMEDOUT; 1503 1504 /* Wait soft reset */ 1505 usleep_range(1000, 5000); 1506 1507 ret = netsec_mac_read(priv, GMAC_REG_BMR, &value); 1508 if (ret) 1509 return ret; 1510 if (value & NETSEC_GMAC_BMR_REG_SWR) 1511 return -EAGAIN; 1512 1513 netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1); 1514 if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1)) 1515 return -ETIMEDOUT; 1516 1517 netsec_write(priv, MAC_REG_DESC_INIT, 1); 1518 if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1)) 1519 return -ETIMEDOUT; 1520 1521 if (netsec_mac_write(priv, GMAC_REG_BMR, 1522 NETSEC_GMAC_BMR_REG_COMMON)) 1523 return -ETIMEDOUT; 1524 if (netsec_mac_write(priv, GMAC_REG_RDLAR, 1525 NETSEC_GMAC_RDLAR_REG_COMMON)) 1526 return -ETIMEDOUT; 1527 if (netsec_mac_write(priv, GMAC_REG_TDLAR, 1528 NETSEC_GMAC_TDLAR_REG_COMMON)) 1529 return -ETIMEDOUT; 1530 if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001)) 1531 return -ETIMEDOUT; 1532 1533 ret = netsec_mac_update_to_phy_state(priv); 1534 if (ret) 1535 return ret; 1536 1537 ret = netsec_mac_read(priv, GMAC_REG_OMR, &value); 1538 if (ret) 1539 return ret; 1540 1541 value |= NETSEC_GMAC_OMR_REG_SR; 1542 value |= NETSEC_GMAC_OMR_REG_ST; 1543 1544 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); 1545 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); 1546 1547 netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce); 1548 1549 if (netsec_mac_write(priv, GMAC_REG_OMR, value)) 1550 return -ETIMEDOUT; 1551 1552 return 0; 1553 } 1554 1555 static int netsec_stop_gmac(struct netsec_priv *priv) 1556 { 1557 u32 value; 1558 int ret; 1559 1560 ret = netsec_mac_read(priv, GMAC_REG_OMR, &value); 1561 if (ret) 1562 return ret; 1563 value &= ~NETSEC_GMAC_OMR_REG_SR; 1564 value &= ~NETSEC_GMAC_OMR_REG_ST; 1565 1566 /* disable all interrupts */ 1567 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); 1568 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); 1569 1570 return netsec_mac_write(priv, GMAC_REG_OMR, value); 1571 } 1572 1573 static void netsec_phy_adjust_link(struct net_device *ndev) 1574 { 1575 struct netsec_priv *priv = netdev_priv(ndev); 1576 1577 if (ndev->phydev->link) 1578 netsec_start_gmac(priv); 1579 else 1580 netsec_stop_gmac(priv); 1581 1582 phy_print_status(ndev->phydev); 1583 } 1584 1585 static irqreturn_t netsec_irq_handler(int irq, void *dev_id) 1586 { 1587 struct netsec_priv *priv = dev_id; 1588 u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS); 1589 unsigned long flags; 1590 1591 /* Disable interrupts */ 1592 if (status & NETSEC_IRQ_TX) { 1593 val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS); 1594 netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val); 1595 } 1596 if (status & NETSEC_IRQ_RX) { 1597 val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS); 1598 netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val); 1599 } 1600 1601 spin_lock_irqsave(&priv->reglock, flags); 1602 netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX); 1603 spin_unlock_irqrestore(&priv->reglock, flags); 1604 1605 napi_schedule(&priv->napi); 1606 1607 return IRQ_HANDLED; 1608 } 1609 1610 static int netsec_netdev_open(struct net_device *ndev) 1611 { 1612 struct netsec_priv *priv = netdev_priv(ndev); 1613 int ret; 1614 1615 pm_runtime_get_sync(priv->dev); 1616 1617 netsec_setup_tx_dring(priv); 1618 ret = netsec_setup_rx_dring(priv); 1619 if (ret) { 1620 netif_err(priv, probe, priv->ndev, 1621 "%s: fail setup ring\n", __func__); 1622 goto err1; 1623 } 1624 1625 ret = request_irq(priv->ndev->irq, netsec_irq_handler, 1626 IRQF_SHARED, "netsec", priv); 1627 if (ret) { 1628 netif_err(priv, drv, priv->ndev, "request_irq failed\n"); 1629 goto err2; 1630 } 1631 1632 if (dev_of_node(priv->dev)) { 1633 if (!of_phy_connect(priv->ndev, priv->phy_np, 1634 netsec_phy_adjust_link, 0, 1635 priv->phy_interface)) { 1636 netif_err(priv, link, priv->ndev, "missing PHY\n"); 1637 ret = -ENODEV; 1638 goto err3; 1639 } 1640 } else { 1641 ret = phy_connect_direct(priv->ndev, priv->phydev, 1642 netsec_phy_adjust_link, 1643 priv->phy_interface); 1644 if (ret) { 1645 netif_err(priv, link, priv->ndev, 1646 "phy_connect_direct() failed (%d)\n", ret); 1647 goto err3; 1648 } 1649 } 1650 1651 phy_start(ndev->phydev); 1652 1653 netsec_start_gmac(priv); 1654 napi_enable(&priv->napi); 1655 netif_start_queue(ndev); 1656 1657 /* Enable TX+RX intr. */ 1658 netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX); 1659 1660 return 0; 1661 err3: 1662 free_irq(priv->ndev->irq, priv); 1663 err2: 1664 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); 1665 err1: 1666 pm_runtime_put_sync(priv->dev); 1667 return ret; 1668 } 1669 1670 static int netsec_netdev_stop(struct net_device *ndev) 1671 { 1672 int ret; 1673 struct netsec_priv *priv = netdev_priv(ndev); 1674 1675 netif_stop_queue(priv->ndev); 1676 dma_wmb(); 1677 1678 napi_disable(&priv->napi); 1679 1680 netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0); 1681 netsec_stop_gmac(priv); 1682 1683 free_irq(priv->ndev->irq, priv); 1684 1685 netsec_uninit_pkt_dring(priv, NETSEC_RING_TX); 1686 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); 1687 1688 phy_stop(ndev->phydev); 1689 phy_disconnect(ndev->phydev); 1690 1691 ret = netsec_reset_hardware(priv, false); 1692 1693 pm_runtime_put_sync(priv->dev); 1694 1695 return ret; 1696 } 1697 1698 static int netsec_netdev_init(struct net_device *ndev) 1699 { 1700 struct netsec_priv *priv = netdev_priv(ndev); 1701 int ret; 1702 u16 data; 1703 1704 BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM); 1705 1706 ret = netsec_alloc_dring(priv, NETSEC_RING_TX); 1707 if (ret) 1708 return ret; 1709 1710 ret = netsec_alloc_dring(priv, NETSEC_RING_RX); 1711 if (ret) 1712 goto err1; 1713 1714 /* set phy power down */ 1715 data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR); 1716 netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, 1717 data | BMCR_PDOWN); 1718 1719 ret = netsec_reset_hardware(priv, true); 1720 if (ret) 1721 goto err2; 1722 1723 /* Restore phy power state */ 1724 netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); 1725 1726 spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); 1727 spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock); 1728 1729 return 0; 1730 err2: 1731 netsec_free_dring(priv, NETSEC_RING_RX); 1732 err1: 1733 netsec_free_dring(priv, NETSEC_RING_TX); 1734 return ret; 1735 } 1736 1737 static void netsec_netdev_uninit(struct net_device *ndev) 1738 { 1739 struct netsec_priv *priv = netdev_priv(ndev); 1740 1741 netsec_free_dring(priv, NETSEC_RING_RX); 1742 netsec_free_dring(priv, NETSEC_RING_TX); 1743 } 1744 1745 static int netsec_netdev_set_features(struct net_device *ndev, 1746 netdev_features_t features) 1747 { 1748 struct netsec_priv *priv = netdev_priv(ndev); 1749 1750 priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM); 1751 1752 return 0; 1753 } 1754 1755 static int netsec_xdp_xmit(struct net_device *ndev, int n, 1756 struct xdp_frame **frames, u32 flags) 1757 { 1758 struct netsec_priv *priv = netdev_priv(ndev); 1759 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; 1760 int i, nxmit = 0; 1761 1762 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 1763 return -EINVAL; 1764 1765 spin_lock(&tx_ring->lock); 1766 for (i = 0; i < n; i++) { 1767 struct xdp_frame *xdpf = frames[i]; 1768 int err; 1769 1770 err = netsec_xdp_queue_one(priv, xdpf, true); 1771 if (err != NETSEC_XDP_TX) 1772 break; 1773 1774 tx_ring->xdp_xmit++; 1775 nxmit++; 1776 } 1777 spin_unlock(&tx_ring->lock); 1778 1779 if (unlikely(flags & XDP_XMIT_FLUSH)) { 1780 netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit); 1781 tx_ring->xdp_xmit = 0; 1782 } 1783 1784 return nxmit; 1785 } 1786 1787 static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog, 1788 struct netlink_ext_ack *extack) 1789 { 1790 struct net_device *dev = priv->ndev; 1791 struct bpf_prog *old_prog; 1792 1793 /* For now just support only the usual MTU sized frames */ 1794 if (prog && dev->mtu > 1500) { 1795 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP"); 1796 return -EOPNOTSUPP; 1797 } 1798 1799 if (netif_running(dev)) 1800 netsec_netdev_stop(dev); 1801 1802 /* Detach old prog, if any */ 1803 old_prog = xchg(&priv->xdp_prog, prog); 1804 if (old_prog) 1805 bpf_prog_put(old_prog); 1806 1807 if (netif_running(dev)) 1808 netsec_netdev_open(dev); 1809 1810 return 0; 1811 } 1812 1813 static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp) 1814 { 1815 struct netsec_priv *priv = netdev_priv(ndev); 1816 1817 switch (xdp->command) { 1818 case XDP_SETUP_PROG: 1819 return netsec_xdp_setup(priv, xdp->prog, xdp->extack); 1820 default: 1821 return -EINVAL; 1822 } 1823 } 1824 1825 static const struct net_device_ops netsec_netdev_ops = { 1826 .ndo_init = netsec_netdev_init, 1827 .ndo_uninit = netsec_netdev_uninit, 1828 .ndo_open = netsec_netdev_open, 1829 .ndo_stop = netsec_netdev_stop, 1830 .ndo_start_xmit = netsec_netdev_start_xmit, 1831 .ndo_set_features = netsec_netdev_set_features, 1832 .ndo_set_mac_address = eth_mac_addr, 1833 .ndo_validate_addr = eth_validate_addr, 1834 .ndo_do_ioctl = phy_do_ioctl, 1835 .ndo_xdp_xmit = netsec_xdp_xmit, 1836 .ndo_bpf = netsec_xdp, 1837 }; 1838 1839 static int netsec_of_probe(struct platform_device *pdev, 1840 struct netsec_priv *priv, u32 *phy_addr) 1841 { 1842 int err; 1843 1844 err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface); 1845 if (err) { 1846 dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); 1847 return err; 1848 } 1849 1850 priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1851 if (!priv->phy_np) { 1852 dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); 1853 return -EINVAL; 1854 } 1855 1856 *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np); 1857 1858 priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ 1859 if (IS_ERR(priv->clk)) { 1860 dev_err(&pdev->dev, "phy_ref_clk not found\n"); 1861 return PTR_ERR(priv->clk); 1862 } 1863 priv->freq = clk_get_rate(priv->clk); 1864 1865 return 0; 1866 } 1867 1868 static int netsec_acpi_probe(struct platform_device *pdev, 1869 struct netsec_priv *priv, u32 *phy_addr) 1870 { 1871 int ret; 1872 1873 if (!IS_ENABLED(CONFIG_ACPI)) 1874 return -ENODEV; 1875 1876 /* ACPI systems are assumed to configure the PHY in firmware, so 1877 * there is really no need to discover the PHY mode from the DSDT. 1878 * Since firmware is known to exist in the field that configures the 1879 * PHY correctly but passes the wrong mode string in the phy-mode 1880 * device property, we have no choice but to ignore it. 1881 */ 1882 priv->phy_interface = PHY_INTERFACE_MODE_NA; 1883 1884 ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); 1885 if (ret) { 1886 dev_err(&pdev->dev, 1887 "missing required property 'phy-channel'\n"); 1888 return ret; 1889 } 1890 1891 ret = device_property_read_u32(&pdev->dev, 1892 "socionext,phy-clock-frequency", 1893 &priv->freq); 1894 if (ret) 1895 dev_err(&pdev->dev, 1896 "missing required property 'socionext,phy-clock-frequency'\n"); 1897 return ret; 1898 } 1899 1900 static void netsec_unregister_mdio(struct netsec_priv *priv) 1901 { 1902 struct phy_device *phydev = priv->phydev; 1903 1904 if (!dev_of_node(priv->dev) && phydev) { 1905 phy_device_remove(phydev); 1906 phy_device_free(phydev); 1907 } 1908 1909 mdiobus_unregister(priv->mii_bus); 1910 } 1911 1912 static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) 1913 { 1914 struct mii_bus *bus; 1915 int ret; 1916 1917 bus = devm_mdiobus_alloc(priv->dev); 1918 if (!bus) 1919 return -ENOMEM; 1920 1921 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev)); 1922 bus->priv = priv; 1923 bus->name = "SNI NETSEC MDIO"; 1924 bus->read = netsec_phy_read; 1925 bus->write = netsec_phy_write; 1926 bus->parent = priv->dev; 1927 priv->mii_bus = bus; 1928 1929 if (dev_of_node(priv->dev)) { 1930 struct device_node *mdio_node, *parent = dev_of_node(priv->dev); 1931 1932 mdio_node = of_get_child_by_name(parent, "mdio"); 1933 if (mdio_node) { 1934 parent = mdio_node; 1935 } else { 1936 /* older f/w doesn't populate the mdio subnode, 1937 * allow relaxed upgrade of f/w in due time. 1938 */ 1939 dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n"); 1940 } 1941 1942 ret = of_mdiobus_register(bus, parent); 1943 of_node_put(mdio_node); 1944 1945 if (ret) { 1946 dev_err(priv->dev, "mdiobus register err(%d)\n", ret); 1947 return ret; 1948 } 1949 } else { 1950 /* Mask out all PHYs from auto probing. */ 1951 bus->phy_mask = ~0; 1952 ret = mdiobus_register(bus); 1953 if (ret) { 1954 dev_err(priv->dev, "mdiobus register err(%d)\n", ret); 1955 return ret; 1956 } 1957 1958 priv->phydev = get_phy_device(bus, phy_addr, false); 1959 if (IS_ERR(priv->phydev)) { 1960 ret = PTR_ERR(priv->phydev); 1961 dev_err(priv->dev, "get_phy_device err(%d)\n", ret); 1962 priv->phydev = NULL; 1963 return -ENODEV; 1964 } 1965 1966 ret = phy_device_register(priv->phydev); 1967 if (ret) { 1968 mdiobus_unregister(bus); 1969 dev_err(priv->dev, 1970 "phy_device_register err(%d)\n", ret); 1971 } 1972 } 1973 1974 return ret; 1975 } 1976 1977 static int netsec_probe(struct platform_device *pdev) 1978 { 1979 struct resource *mmio_res, *eeprom_res, *irq_res; 1980 u8 *mac, macbuf[ETH_ALEN]; 1981 struct netsec_priv *priv; 1982 u32 hw_ver, phy_addr = 0; 1983 struct net_device *ndev; 1984 int ret; 1985 1986 mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1987 if (!mmio_res) { 1988 dev_err(&pdev->dev, "No MMIO resource found.\n"); 1989 return -ENODEV; 1990 } 1991 1992 eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1993 if (!eeprom_res) { 1994 dev_info(&pdev->dev, "No EEPROM resource found.\n"); 1995 return -ENODEV; 1996 } 1997 1998 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1999 if (!irq_res) { 2000 dev_err(&pdev->dev, "No IRQ resource found.\n"); 2001 return -ENODEV; 2002 } 2003 2004 ndev = alloc_etherdev(sizeof(*priv)); 2005 if (!ndev) 2006 return -ENOMEM; 2007 2008 priv = netdev_priv(ndev); 2009 2010 spin_lock_init(&priv->reglock); 2011 SET_NETDEV_DEV(ndev, &pdev->dev); 2012 platform_set_drvdata(pdev, priv); 2013 ndev->irq = irq_res->start; 2014 priv->dev = &pdev->dev; 2015 priv->ndev = ndev; 2016 2017 priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | 2018 NETIF_MSG_LINK | NETIF_MSG_PROBE; 2019 2020 priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start, 2021 resource_size(mmio_res)); 2022 if (!priv->ioaddr) { 2023 dev_err(&pdev->dev, "devm_ioremap() failed\n"); 2024 ret = -ENXIO; 2025 goto free_ndev; 2026 } 2027 2028 priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start, 2029 resource_size(eeprom_res)); 2030 if (!priv->eeprom_base) { 2031 dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n"); 2032 ret = -ENXIO; 2033 goto free_ndev; 2034 } 2035 2036 mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf)); 2037 if (mac) 2038 ether_addr_copy(ndev->dev_addr, mac); 2039 2040 if (priv->eeprom_base && 2041 (!mac || !is_valid_ether_addr(ndev->dev_addr))) { 2042 void __iomem *macp = priv->eeprom_base + 2043 NETSEC_EEPROM_MAC_ADDRESS; 2044 2045 ndev->dev_addr[0] = readb(macp + 3); 2046 ndev->dev_addr[1] = readb(macp + 2); 2047 ndev->dev_addr[2] = readb(macp + 1); 2048 ndev->dev_addr[3] = readb(macp + 0); 2049 ndev->dev_addr[4] = readb(macp + 7); 2050 ndev->dev_addr[5] = readb(macp + 6); 2051 } 2052 2053 if (!is_valid_ether_addr(ndev->dev_addr)) { 2054 dev_warn(&pdev->dev, "No MAC address found, using random\n"); 2055 eth_hw_addr_random(ndev); 2056 } 2057 2058 if (dev_of_node(&pdev->dev)) 2059 ret = netsec_of_probe(pdev, priv, &phy_addr); 2060 else 2061 ret = netsec_acpi_probe(pdev, priv, &phy_addr); 2062 if (ret) 2063 goto free_ndev; 2064 2065 priv->phy_addr = phy_addr; 2066 2067 if (!priv->freq) { 2068 dev_err(&pdev->dev, "missing PHY reference clock frequency\n"); 2069 ret = -ENODEV; 2070 goto free_ndev; 2071 } 2072 2073 /* default for throughput */ 2074 priv->et_coalesce.rx_coalesce_usecs = 500; 2075 priv->et_coalesce.rx_max_coalesced_frames = 8; 2076 priv->et_coalesce.tx_coalesce_usecs = 500; 2077 priv->et_coalesce.tx_max_coalesced_frames = 8; 2078 2079 ret = device_property_read_u32(&pdev->dev, "max-frame-size", 2080 &ndev->max_mtu); 2081 if (ret < 0) 2082 ndev->max_mtu = ETH_DATA_LEN; 2083 2084 /* runtime_pm coverage just for probe, open/close also cover it */ 2085 pm_runtime_enable(&pdev->dev); 2086 pm_runtime_get_sync(&pdev->dev); 2087 2088 hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER); 2089 /* this driver only supports F_TAIKI style NETSEC */ 2090 if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) != 2091 NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) { 2092 ret = -ENODEV; 2093 goto pm_disable; 2094 } 2095 2096 dev_info(&pdev->dev, "hardware revision %d.%d\n", 2097 hw_ver >> 16, hw_ver & 0xffff); 2098 2099 netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT); 2100 2101 ndev->netdev_ops = &netsec_netdev_ops; 2102 ndev->ethtool_ops = &netsec_ethtool_ops; 2103 2104 ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO | 2105 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2106 ndev->hw_features = ndev->features; 2107 2108 priv->rx_cksum_offload_flag = true; 2109 2110 ret = netsec_register_mdio(priv, phy_addr); 2111 if (ret) 2112 goto unreg_napi; 2113 2114 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) 2115 dev_warn(&pdev->dev, "Failed to set DMA mask\n"); 2116 2117 ret = register_netdev(ndev); 2118 if (ret) { 2119 netif_err(priv, probe, ndev, "register_netdev() failed\n"); 2120 goto unreg_mii; 2121 } 2122 2123 pm_runtime_put_sync(&pdev->dev); 2124 return 0; 2125 2126 unreg_mii: 2127 netsec_unregister_mdio(priv); 2128 unreg_napi: 2129 netif_napi_del(&priv->napi); 2130 pm_disable: 2131 pm_runtime_put_sync(&pdev->dev); 2132 pm_runtime_disable(&pdev->dev); 2133 free_ndev: 2134 free_netdev(ndev); 2135 dev_err(&pdev->dev, "init failed\n"); 2136 2137 return ret; 2138 } 2139 2140 static int netsec_remove(struct platform_device *pdev) 2141 { 2142 struct netsec_priv *priv = platform_get_drvdata(pdev); 2143 2144 unregister_netdev(priv->ndev); 2145 2146 netsec_unregister_mdio(priv); 2147 2148 netif_napi_del(&priv->napi); 2149 2150 pm_runtime_disable(&pdev->dev); 2151 free_netdev(priv->ndev); 2152 2153 return 0; 2154 } 2155 2156 #ifdef CONFIG_PM 2157 static int netsec_runtime_suspend(struct device *dev) 2158 { 2159 struct netsec_priv *priv = dev_get_drvdata(dev); 2160 2161 netsec_write(priv, NETSEC_REG_CLK_EN, 0); 2162 2163 clk_disable_unprepare(priv->clk); 2164 2165 return 0; 2166 } 2167 2168 static int netsec_runtime_resume(struct device *dev) 2169 { 2170 struct netsec_priv *priv = dev_get_drvdata(dev); 2171 2172 clk_prepare_enable(priv->clk); 2173 2174 netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D | 2175 NETSEC_CLK_EN_REG_DOM_C | 2176 NETSEC_CLK_EN_REG_DOM_G); 2177 return 0; 2178 } 2179 #endif 2180 2181 static const struct dev_pm_ops netsec_pm_ops = { 2182 SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL) 2183 }; 2184 2185 static const struct of_device_id netsec_dt_ids[] = { 2186 { .compatible = "socionext,synquacer-netsec" }, 2187 { } 2188 }; 2189 MODULE_DEVICE_TABLE(of, netsec_dt_ids); 2190 2191 #ifdef CONFIG_ACPI 2192 static const struct acpi_device_id netsec_acpi_ids[] = { 2193 { "SCX0001" }, 2194 { } 2195 }; 2196 MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids); 2197 #endif 2198 2199 static struct platform_driver netsec_driver = { 2200 .probe = netsec_probe, 2201 .remove = netsec_remove, 2202 .driver = { 2203 .name = "netsec", 2204 .pm = &netsec_pm_ops, 2205 .of_match_table = netsec_dt_ids, 2206 .acpi_match_table = ACPI_PTR(netsec_acpi_ids), 2207 }, 2208 }; 2209 module_platform_driver(netsec_driver); 2210 2211 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>"); 2212 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 2213 MODULE_DESCRIPTION("NETSEC Ethernet driver"); 2214 MODULE_LICENSE("GPL"); 2215