1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. 4 * DWC Ether MAC version 4.xx has been used for developing this code. 5 * 6 * This contains the functions to handle the dma. 7 * 8 * Copyright (C) 2015 STMicroelectronics Ltd 9 * 10 * Author: Alexandre Torgue <alexandre.torgue@st.com> 11 */ 12 13 #include <linux/io.h> 14 #include "dwmac4.h" 15 #include "dwmac4_dma.h" 16 #include "stmmac.h" 17 18 static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) 19 { 20 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); 21 int i; 22 23 pr_info("dwmac4: Master AXI performs %s burst length\n", 24 (value & DMA_SYS_BUS_FB) ? "fixed" : "any"); 25 26 if (axi->axi_lpi_en) 27 value |= DMA_AXI_EN_LPI; 28 if (axi->axi_xit_frm) 29 value |= DMA_AXI_LPI_XIT_FRM; 30 31 value &= ~DMA_AXI_WR_OSR_LMT; 32 value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) << 33 DMA_AXI_WR_OSR_LMT_SHIFT; 34 35 value &= ~DMA_AXI_RD_OSR_LMT; 36 value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) << 37 DMA_AXI_RD_OSR_LMT_SHIFT; 38 39 /* Depending on the UNDEF bit the Master AXI will perform any burst 40 * length according to the BLEN programmed (by default all BLEN are 41 * set). 42 */ 43 for (i = 0; i < AXI_BLEN; i++) { 44 switch (axi->axi_blen[i]) { 45 case 256: 46 value |= DMA_AXI_BLEN256; 47 break; 48 case 128: 49 value |= DMA_AXI_BLEN128; 50 break; 51 case 64: 52 value |= DMA_AXI_BLEN64; 53 break; 54 case 32: 55 value |= DMA_AXI_BLEN32; 56 break; 57 case 16: 58 value |= DMA_AXI_BLEN16; 59 break; 60 case 8: 61 value |= DMA_AXI_BLEN8; 62 break; 63 case 4: 64 value |= DMA_AXI_BLEN4; 65 break; 66 } 67 } 68 69 writel(value, ioaddr + DMA_SYS_BUS_MODE); 70 } 71 72 static void dwmac4_dma_init_rx_chan(struct stmmac_priv *priv, 73 void __iomem *ioaddr, 74 struct stmmac_dma_cfg *dma_cfg, 75 dma_addr_t dma_rx_phy, u32 chan) 76 { 77 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 78 u32 value; 79 u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; 80 81 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan)); 82 value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); 83 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan)); 84 85 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) 86 writel(upper_32_bits(dma_rx_phy), 87 ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, chan)); 88 89 writel(lower_32_bits(dma_rx_phy), 90 ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, chan)); 91 } 92 93 static void dwmac4_dma_init_tx_chan(struct stmmac_priv *priv, 94 void __iomem *ioaddr, 95 struct stmmac_dma_cfg *dma_cfg, 96 dma_addr_t dma_tx_phy, u32 chan) 97 { 98 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 99 u32 value; 100 u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; 101 102 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); 103 value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); 104 105 /* Enable OSP to get best performance */ 106 value |= DMA_CONTROL_OSP; 107 108 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); 109 110 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) 111 writel(upper_32_bits(dma_tx_phy), 112 ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, chan)); 113 114 writel(lower_32_bits(dma_tx_phy), 115 ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, chan)); 116 } 117 118 static void dwmac4_dma_init_channel(struct stmmac_priv *priv, 119 void __iomem *ioaddr, 120 struct stmmac_dma_cfg *dma_cfg, u32 chan) 121 { 122 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 123 u32 value; 124 125 /* common channel control register config */ 126 value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); 127 if (dma_cfg->pblx8) 128 value = value | DMA_BUS_MODE_PBL; 129 writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); 130 131 /* Mask interrupts by writing to CSR7 */ 132 writel(DMA_CHAN_INTR_DEFAULT_MASK, 133 ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan)); 134 } 135 136 static void dwmac410_dma_init_channel(struct stmmac_priv *priv, 137 void __iomem *ioaddr, 138 struct stmmac_dma_cfg *dma_cfg, u32 chan) 139 { 140 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 141 u32 value; 142 143 /* common channel control register config */ 144 value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); 145 if (dma_cfg->pblx8) 146 value = value | DMA_BUS_MODE_PBL; 147 148 writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); 149 150 /* Mask interrupts by writing to CSR7 */ 151 writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, 152 ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan)); 153 } 154 155 static void dwmac4_dma_init(void __iomem *ioaddr, 156 struct stmmac_dma_cfg *dma_cfg, int atds) 157 { 158 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); 159 160 /* Set the Fixed burst mode */ 161 if (dma_cfg->fixed_burst) 162 value |= DMA_SYS_BUS_FB; 163 164 /* Mixed Burst has no effect when fb is set */ 165 if (dma_cfg->mixed_burst) 166 value |= DMA_SYS_BUS_MB; 167 168 if (dma_cfg->aal) 169 value |= DMA_SYS_BUS_AAL; 170 171 if (dma_cfg->eame) 172 value |= DMA_SYS_BUS_EAME; 173 174 writel(value, ioaddr + DMA_SYS_BUS_MODE); 175 176 value = readl(ioaddr + DMA_BUS_MODE); 177 178 if (dma_cfg->multi_msi_en) { 179 value &= ~DMA_BUS_MODE_INTM_MASK; 180 value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT); 181 } 182 183 if (dma_cfg->dche) 184 value |= DMA_BUS_MODE_DCHE; 185 186 writel(value, ioaddr + DMA_BUS_MODE); 187 188 } 189 190 static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv, 191 void __iomem *ioaddr, u32 channel, 192 u32 *reg_space) 193 { 194 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 195 const struct dwmac4_addrs *default_addrs = NULL; 196 197 /* Purposely save the registers in the "normal" layout, regardless of 198 * platform modifications, to keep reg_space size constant 199 */ 200 reg_space[DMA_CHAN_CONTROL(default_addrs, channel) / 4] = 201 readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, channel)); 202 reg_space[DMA_CHAN_TX_CONTROL(default_addrs, channel) / 4] = 203 readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, channel)); 204 reg_space[DMA_CHAN_RX_CONTROL(default_addrs, channel) / 4] = 205 readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, channel)); 206 reg_space[DMA_CHAN_TX_BASE_ADDR(default_addrs, channel) / 4] = 207 readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, channel)); 208 reg_space[DMA_CHAN_RX_BASE_ADDR(default_addrs, channel) / 4] = 209 readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, channel)); 210 reg_space[DMA_CHAN_TX_END_ADDR(default_addrs, channel) / 4] = 211 readl(ioaddr + DMA_CHAN_TX_END_ADDR(dwmac4_addrs, channel)); 212 reg_space[DMA_CHAN_RX_END_ADDR(default_addrs, channel) / 4] = 213 readl(ioaddr + DMA_CHAN_RX_END_ADDR(dwmac4_addrs, channel)); 214 reg_space[DMA_CHAN_TX_RING_LEN(default_addrs, channel) / 4] = 215 readl(ioaddr + DMA_CHAN_TX_RING_LEN(dwmac4_addrs, channel)); 216 reg_space[DMA_CHAN_RX_RING_LEN(default_addrs, channel) / 4] = 217 readl(ioaddr + DMA_CHAN_RX_RING_LEN(dwmac4_addrs, channel)); 218 reg_space[DMA_CHAN_INTR_ENA(default_addrs, channel) / 4] = 219 readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, channel)); 220 reg_space[DMA_CHAN_RX_WATCHDOG(default_addrs, channel) / 4] = 221 readl(ioaddr + DMA_CHAN_RX_WATCHDOG(dwmac4_addrs, channel)); 222 reg_space[DMA_CHAN_SLOT_CTRL_STATUS(default_addrs, channel) / 4] = 223 readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(dwmac4_addrs, channel)); 224 reg_space[DMA_CHAN_CUR_TX_DESC(default_addrs, channel) / 4] = 225 readl(ioaddr + DMA_CHAN_CUR_TX_DESC(dwmac4_addrs, channel)); 226 reg_space[DMA_CHAN_CUR_RX_DESC(default_addrs, channel) / 4] = 227 readl(ioaddr + DMA_CHAN_CUR_RX_DESC(dwmac4_addrs, channel)); 228 reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(default_addrs, channel) / 4] = 229 readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(dwmac4_addrs, channel)); 230 reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(default_addrs, channel) / 4] = 231 readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(dwmac4_addrs, channel)); 232 reg_space[DMA_CHAN_STATUS(default_addrs, channel) / 4] = 233 readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, channel)); 234 } 235 236 static void dwmac4_dump_dma_regs(struct stmmac_priv *priv, void __iomem *ioaddr, 237 u32 *reg_space) 238 { 239 int i; 240 241 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) 242 _dwmac4_dump_dma_regs(priv, ioaddr, i, reg_space); 243 } 244 245 static void dwmac4_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr, 246 u32 riwt, u32 queue) 247 { 248 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 249 250 writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(dwmac4_addrs, queue)); 251 } 252 253 static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv, 254 void __iomem *ioaddr, int mode, 255 u32 channel, int fifosz, u8 qmode) 256 { 257 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 258 unsigned int rqs = fifosz / 256 - 1; 259 u32 mtl_rx_op; 260 261 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel)); 262 263 if (mode == SF_DMA_MODE) { 264 pr_debug("GMAC: enable RX store and forward mode\n"); 265 mtl_rx_op |= MTL_OP_MODE_RSF; 266 } else { 267 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); 268 mtl_rx_op &= ~MTL_OP_MODE_RSF; 269 mtl_rx_op &= MTL_OP_MODE_RTC_MASK; 270 if (mode <= 32) 271 mtl_rx_op |= MTL_OP_MODE_RTC_32; 272 else if (mode <= 64) 273 mtl_rx_op |= MTL_OP_MODE_RTC_64; 274 else if (mode <= 96) 275 mtl_rx_op |= MTL_OP_MODE_RTC_96; 276 else 277 mtl_rx_op |= MTL_OP_MODE_RTC_128; 278 } 279 280 mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK; 281 mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; 282 283 /* Enable flow control only if each channel gets 4 KiB or more FIFO and 284 * only if channel is not an AVB channel. 285 */ 286 if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) { 287 unsigned int rfd, rfa; 288 289 mtl_rx_op |= MTL_OP_MODE_EHFC; 290 291 /* Set Threshold for Activating Flow Control to min 2 frames, 292 * i.e. 1500 * 2 = 3000 bytes. 293 * 294 * Set Threshold for Deactivating Flow Control to min 1 frame, 295 * i.e. 1500 bytes. 296 */ 297 switch (fifosz) { 298 case 4096: 299 /* This violates the above formula because of FIFO size 300 * limit therefore overflow may occur in spite of this. 301 */ 302 rfd = 0x03; /* Full-2.5K */ 303 rfa = 0x01; /* Full-1.5K */ 304 break; 305 306 default: 307 rfd = 0x07; /* Full-4.5K */ 308 rfa = 0x04; /* Full-3K */ 309 break; 310 } 311 312 mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK; 313 mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT; 314 315 mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK; 316 mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT; 317 } 318 319 writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel)); 320 } 321 322 static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv, 323 void __iomem *ioaddr, int mode, 324 u32 channel, int fifosz, u8 qmode) 325 { 326 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 327 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, 328 channel)); 329 unsigned int tqs = fifosz / 256 - 1; 330 331 if (mode == SF_DMA_MODE) { 332 pr_debug("GMAC: enable TX store and forward mode\n"); 333 /* Transmit COE type 2 cannot be done in cut-through mode. */ 334 mtl_tx_op |= MTL_OP_MODE_TSF; 335 } else { 336 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); 337 mtl_tx_op &= ~MTL_OP_MODE_TSF; 338 mtl_tx_op &= MTL_OP_MODE_TTC_MASK; 339 /* Set the transmit threshold */ 340 if (mode <= 32) 341 mtl_tx_op |= MTL_OP_MODE_TTC_32; 342 else if (mode <= 64) 343 mtl_tx_op |= MTL_OP_MODE_TTC_64; 344 else if (mode <= 96) 345 mtl_tx_op |= MTL_OP_MODE_TTC_96; 346 else if (mode <= 128) 347 mtl_tx_op |= MTL_OP_MODE_TTC_128; 348 else if (mode <= 192) 349 mtl_tx_op |= MTL_OP_MODE_TTC_192; 350 else if (mode <= 256) 351 mtl_tx_op |= MTL_OP_MODE_TTC_256; 352 else if (mode <= 384) 353 mtl_tx_op |= MTL_OP_MODE_TTC_384; 354 else 355 mtl_tx_op |= MTL_OP_MODE_TTC_512; 356 } 357 /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO 358 * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE. 359 * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W 360 * with reset values: TXQEN off, TQS 256 bytes. 361 * 362 * TXQEN must be written for multi-channel operation and TQS must 363 * reflect the available fifo size per queue (total fifo size / number 364 * of enabled queues). 365 */ 366 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; 367 if (qmode != MTL_QUEUE_AVB) 368 mtl_tx_op |= MTL_OP_MODE_TXQEN; 369 else 370 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; 371 mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK; 372 mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT; 373 374 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel)); 375 } 376 377 static int dwmac4_get_hw_feature(void __iomem *ioaddr, 378 struct dma_features *dma_cap) 379 { 380 u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0); 381 382 /* MAC HW feature0 */ 383 dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL); 384 dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1; 385 dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2; 386 dma_cap->vlhash = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4; 387 dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18; 388 dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3; 389 dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5; 390 dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6; 391 dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7; 392 /* MMC */ 393 dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8; 394 /* IEEE 1588-2008 */ 395 dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12; 396 /* 802.3az - Energy-Efficient Ethernet (EEE) */ 397 dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13; 398 /* TX and RX csum */ 399 dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14; 400 dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16; 401 dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27; 402 dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9; 403 404 /* MAC HW feature1 */ 405 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); 406 dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27; 407 dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24; 408 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; 409 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; 410 dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17; 411 412 dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14; 413 switch (dma_cap->addr64) { 414 case 0: 415 dma_cap->addr64 = 32; 416 break; 417 case 1: 418 dma_cap->addr64 = 40; 419 break; 420 case 2: 421 dma_cap->addr64 = 48; 422 break; 423 default: 424 dma_cap->addr64 = 32; 425 break; 426 } 427 428 /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by 429 * shifting and store the sizes in bytes. 430 */ 431 dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6); 432 dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0); 433 /* MAC HW feature2 */ 434 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); 435 /* TX and RX number of channels */ 436 dma_cap->number_rx_channel = 437 ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; 438 dma_cap->number_tx_channel = 439 ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; 440 /* TX and RX number of queues */ 441 dma_cap->number_rx_queues = 442 ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; 443 dma_cap->number_tx_queues = 444 ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; 445 /* PPS output */ 446 dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24; 447 448 /* IEEE 1588-2002 */ 449 dma_cap->time_stamp = 0; 450 /* Number of Auxiliary Snapshot Inputs */ 451 dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28; 452 453 /* MAC HW feature3 */ 454 hw_cap = readl(ioaddr + GMAC_HW_FEATURE3); 455 456 /* 5.10 Features */ 457 dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28; 458 dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27; 459 dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26; 460 dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20; 461 dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17; 462 dma_cap->estsel = (hw_cap & GMAC_HW_FEAT_ESTSEL) >> 16; 463 dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13; 464 dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11; 465 dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10; 466 dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5; 467 468 return 0; 469 } 470 471 /* Enable/disable TSO feature and set MSS */ 472 static void dwmac4_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr, 473 bool en, u32 chan) 474 { 475 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 476 u32 value; 477 478 if (en) { 479 /* enable TSO */ 480 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); 481 writel(value | DMA_CONTROL_TSE, 482 ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); 483 } else { 484 /* enable TSO */ 485 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); 486 writel(value & ~DMA_CONTROL_TSE, 487 ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); 488 } 489 } 490 491 static void dwmac4_qmode(struct stmmac_priv *priv, void __iomem *ioaddr, 492 u32 channel, u8 qmode) 493 { 494 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 495 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, 496 channel)); 497 498 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; 499 if (qmode != MTL_QUEUE_AVB) 500 mtl_tx_op |= MTL_OP_MODE_TXQEN; 501 else 502 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; 503 504 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel)); 505 } 506 507 static void dwmac4_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr, 508 int bfsize, u32 chan) 509 { 510 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 511 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan)); 512 513 value &= ~DMA_RBSZ_MASK; 514 value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; 515 516 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan)); 517 } 518 519 static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr, 520 bool en, u32 chan) 521 { 522 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 523 u32 value = readl(ioaddr + GMAC_EXT_CONFIG); 524 525 value &= ~GMAC_CONFIG_HDSMS; 526 value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ 527 writel(value, ioaddr + GMAC_EXT_CONFIG); 528 529 value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); 530 if (en) 531 value |= DMA_CONTROL_SPH; 532 else 533 value &= ~DMA_CONTROL_SPH; 534 writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan)); 535 } 536 537 static int dwmac4_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr, 538 bool en, u32 chan) 539 { 540 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; 541 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); 542 543 if (en) 544 value |= DMA_CONTROL_EDSE; 545 else 546 value &= ~DMA_CONTROL_EDSE; 547 548 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan)); 549 550 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, 551 chan)) & DMA_CONTROL_EDSE; 552 if (en && !value) 553 return -EIO; 554 555 writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL); 556 return 0; 557 } 558 559 const struct stmmac_dma_ops dwmac4_dma_ops = { 560 .reset = dwmac4_dma_reset, 561 .init = dwmac4_dma_init, 562 .init_chan = dwmac4_dma_init_channel, 563 .init_rx_chan = dwmac4_dma_init_rx_chan, 564 .init_tx_chan = dwmac4_dma_init_tx_chan, 565 .axi = dwmac4_dma_axi, 566 .dump_regs = dwmac4_dump_dma_regs, 567 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, 568 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, 569 .enable_dma_irq = dwmac4_enable_dma_irq, 570 .disable_dma_irq = dwmac4_disable_dma_irq, 571 .start_tx = dwmac4_dma_start_tx, 572 .stop_tx = dwmac4_dma_stop_tx, 573 .start_rx = dwmac4_dma_start_rx, 574 .stop_rx = dwmac4_dma_stop_rx, 575 .dma_interrupt = dwmac4_dma_interrupt, 576 .get_hw_feature = dwmac4_get_hw_feature, 577 .rx_watchdog = dwmac4_rx_watchdog, 578 .set_rx_ring_len = dwmac4_set_rx_ring_len, 579 .set_tx_ring_len = dwmac4_set_tx_ring_len, 580 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 581 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 582 .enable_tso = dwmac4_enable_tso, 583 .qmode = dwmac4_qmode, 584 .set_bfsize = dwmac4_set_bfsize, 585 .enable_sph = dwmac4_enable_sph, 586 }; 587 588 const struct stmmac_dma_ops dwmac410_dma_ops = { 589 .reset = dwmac4_dma_reset, 590 .init = dwmac4_dma_init, 591 .init_chan = dwmac410_dma_init_channel, 592 .init_rx_chan = dwmac4_dma_init_rx_chan, 593 .init_tx_chan = dwmac4_dma_init_tx_chan, 594 .axi = dwmac4_dma_axi, 595 .dump_regs = dwmac4_dump_dma_regs, 596 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, 597 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, 598 .enable_dma_irq = dwmac410_enable_dma_irq, 599 .disable_dma_irq = dwmac4_disable_dma_irq, 600 .start_tx = dwmac4_dma_start_tx, 601 .stop_tx = dwmac4_dma_stop_tx, 602 .start_rx = dwmac4_dma_start_rx, 603 .stop_rx = dwmac4_dma_stop_rx, 604 .dma_interrupt = dwmac4_dma_interrupt, 605 .get_hw_feature = dwmac4_get_hw_feature, 606 .rx_watchdog = dwmac4_rx_watchdog, 607 .set_rx_ring_len = dwmac4_set_rx_ring_len, 608 .set_tx_ring_len = dwmac4_set_tx_ring_len, 609 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 610 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 611 .enable_tso = dwmac4_enable_tso, 612 .qmode = dwmac4_qmode, 613 .set_bfsize = dwmac4_set_bfsize, 614 .enable_sph = dwmac4_enable_sph, 615 .enable_tbs = dwmac4_enable_tbs, 616 }; 617