1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. 4 * DWC Ether MAC version 4.xx has been used for developing this code. 5 * 6 * This contains the functions to handle the dma. 7 * 8 * Copyright (C) 2015 STMicroelectronics Ltd 9 * 10 * Author: Alexandre Torgue <alexandre.torgue@st.com> 11 */ 12 13 #include <linux/io.h> 14 #include "dwmac4.h" 15 #include "dwmac4_dma.h" 16 17 static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) 18 { 19 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); 20 int i; 21 22 pr_info("dwmac4: Master AXI performs %s burst length\n", 23 (value & DMA_SYS_BUS_FB) ? "fixed" : "any"); 24 25 if (axi->axi_lpi_en) 26 value |= DMA_AXI_EN_LPI; 27 if (axi->axi_xit_frm) 28 value |= DMA_AXI_LPI_XIT_FRM; 29 30 value &= ~DMA_AXI_WR_OSR_LMT; 31 value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) << 32 DMA_AXI_WR_OSR_LMT_SHIFT; 33 34 value &= ~DMA_AXI_RD_OSR_LMT; 35 value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) << 36 DMA_AXI_RD_OSR_LMT_SHIFT; 37 38 /* Depending on the UNDEF bit the Master AXI will perform any burst 39 * length according to the BLEN programmed (by default all BLEN are 40 * set). 41 */ 42 for (i = 0; i < AXI_BLEN; i++) { 43 switch (axi->axi_blen[i]) { 44 case 256: 45 value |= DMA_AXI_BLEN256; 46 break; 47 case 128: 48 value |= DMA_AXI_BLEN128; 49 break; 50 case 64: 51 value |= DMA_AXI_BLEN64; 52 break; 53 case 32: 54 value |= DMA_AXI_BLEN32; 55 break; 56 case 16: 57 value |= DMA_AXI_BLEN16; 58 break; 59 case 8: 60 value |= DMA_AXI_BLEN8; 61 break; 62 case 4: 63 value |= DMA_AXI_BLEN4; 64 break; 65 } 66 } 67 68 writel(value, ioaddr + DMA_SYS_BUS_MODE); 69 } 70 71 static void dwmac4_dma_init_rx_chan(struct stmmac_priv *priv, 72 void __iomem *ioaddr, 73 struct stmmac_dma_cfg *dma_cfg, 74 dma_addr_t dma_rx_phy, u32 chan) 75 { 76 u32 value; 77 u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; 78 79 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); 80 value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); 81 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); 82 83 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) 84 writel(upper_32_bits(dma_rx_phy), 85 ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(chan)); 86 87 writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan)); 88 } 89 90 static void dwmac4_dma_init_tx_chan(struct stmmac_priv *priv, 91 void __iomem *ioaddr, 92 struct stmmac_dma_cfg *dma_cfg, 93 dma_addr_t dma_tx_phy, u32 chan) 94 { 95 u32 value; 96 u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; 97 98 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); 99 value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); 100 101 /* Enable OSP to get best performance */ 102 value |= DMA_CONTROL_OSP; 103 104 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); 105 106 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) 107 writel(upper_32_bits(dma_tx_phy), 108 ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(chan)); 109 110 writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); 111 } 112 113 static void dwmac4_dma_init_channel(struct stmmac_priv *priv, 114 void __iomem *ioaddr, 115 struct stmmac_dma_cfg *dma_cfg, u32 chan) 116 { 117 u32 value; 118 119 /* common channel control register config */ 120 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); 121 if (dma_cfg->pblx8) 122 value = value | DMA_BUS_MODE_PBL; 123 writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); 124 125 /* Mask interrupts by writing to CSR7 */ 126 writel(DMA_CHAN_INTR_DEFAULT_MASK, 127 ioaddr + DMA_CHAN_INTR_ENA(chan)); 128 } 129 130 static void dwmac410_dma_init_channel(struct stmmac_priv *priv, 131 void __iomem *ioaddr, 132 struct stmmac_dma_cfg *dma_cfg, u32 chan) 133 { 134 u32 value; 135 136 /* common channel control register config */ 137 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); 138 if (dma_cfg->pblx8) 139 value = value | DMA_BUS_MODE_PBL; 140 141 writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); 142 143 /* Mask interrupts by writing to CSR7 */ 144 writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, 145 ioaddr + DMA_CHAN_INTR_ENA(chan)); 146 } 147 148 static void dwmac4_dma_init(void __iomem *ioaddr, 149 struct stmmac_dma_cfg *dma_cfg, int atds) 150 { 151 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); 152 153 /* Set the Fixed burst mode */ 154 if (dma_cfg->fixed_burst) 155 value |= DMA_SYS_BUS_FB; 156 157 /* Mixed Burst has no effect when fb is set */ 158 if (dma_cfg->mixed_burst) 159 value |= DMA_SYS_BUS_MB; 160 161 if (dma_cfg->aal) 162 value |= DMA_SYS_BUS_AAL; 163 164 if (dma_cfg->eame) 165 value |= DMA_SYS_BUS_EAME; 166 167 writel(value, ioaddr + DMA_SYS_BUS_MODE); 168 169 value = readl(ioaddr + DMA_BUS_MODE); 170 171 if (dma_cfg->multi_msi_en) { 172 value &= ~DMA_BUS_MODE_INTM_MASK; 173 value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT); 174 } 175 176 if (dma_cfg->dche) 177 value |= DMA_BUS_MODE_DCHE; 178 179 writel(value, ioaddr + DMA_BUS_MODE); 180 181 } 182 183 static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv, 184 void __iomem *ioaddr, u32 channel, 185 u32 *reg_space) 186 { 187 reg_space[DMA_CHAN_CONTROL(channel) / 4] = 188 readl(ioaddr + DMA_CHAN_CONTROL(channel)); 189 reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] = 190 readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); 191 reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] = 192 readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); 193 reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] = 194 readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); 195 reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] = 196 readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); 197 reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] = 198 readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)); 199 reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] = 200 readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)); 201 reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] = 202 readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)); 203 reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] = 204 readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)); 205 reg_space[DMA_CHAN_INTR_ENA(channel) / 4] = 206 readl(ioaddr + DMA_CHAN_INTR_ENA(channel)); 207 reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] = 208 readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)); 209 reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] = 210 readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)); 211 reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] = 212 readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)); 213 reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] = 214 readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)); 215 reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] = 216 readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)); 217 reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] = 218 readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)); 219 reg_space[DMA_CHAN_STATUS(channel) / 4] = 220 readl(ioaddr + DMA_CHAN_STATUS(channel)); 221 } 222 223 static void dwmac4_dump_dma_regs(struct stmmac_priv *priv, void __iomem *ioaddr, 224 u32 *reg_space) 225 { 226 int i; 227 228 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) 229 _dwmac4_dump_dma_regs(priv, ioaddr, i, reg_space); 230 } 231 232 static void dwmac4_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr, 233 u32 riwt, u32 queue) 234 { 235 writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue)); 236 } 237 238 static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv, 239 void __iomem *ioaddr, int mode, 240 u32 channel, int fifosz, u8 qmode) 241 { 242 unsigned int rqs = fifosz / 256 - 1; 243 u32 mtl_rx_op; 244 245 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); 246 247 if (mode == SF_DMA_MODE) { 248 pr_debug("GMAC: enable RX store and forward mode\n"); 249 mtl_rx_op |= MTL_OP_MODE_RSF; 250 } else { 251 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); 252 mtl_rx_op &= ~MTL_OP_MODE_RSF; 253 mtl_rx_op &= MTL_OP_MODE_RTC_MASK; 254 if (mode <= 32) 255 mtl_rx_op |= MTL_OP_MODE_RTC_32; 256 else if (mode <= 64) 257 mtl_rx_op |= MTL_OP_MODE_RTC_64; 258 else if (mode <= 96) 259 mtl_rx_op |= MTL_OP_MODE_RTC_96; 260 else 261 mtl_rx_op |= MTL_OP_MODE_RTC_128; 262 } 263 264 mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK; 265 mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; 266 267 /* Enable flow control only if each channel gets 4 KiB or more FIFO and 268 * only if channel is not an AVB channel. 269 */ 270 if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) { 271 unsigned int rfd, rfa; 272 273 mtl_rx_op |= MTL_OP_MODE_EHFC; 274 275 /* Set Threshold for Activating Flow Control to min 2 frames, 276 * i.e. 1500 * 2 = 3000 bytes. 277 * 278 * Set Threshold for Deactivating Flow Control to min 1 frame, 279 * i.e. 1500 bytes. 280 */ 281 switch (fifosz) { 282 case 4096: 283 /* This violates the above formula because of FIFO size 284 * limit therefore overflow may occur in spite of this. 285 */ 286 rfd = 0x03; /* Full-2.5K */ 287 rfa = 0x01; /* Full-1.5K */ 288 break; 289 290 default: 291 rfd = 0x07; /* Full-4.5K */ 292 rfa = 0x04; /* Full-3K */ 293 break; 294 } 295 296 mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK; 297 mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT; 298 299 mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK; 300 mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT; 301 } 302 303 writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); 304 } 305 306 static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv, 307 void __iomem *ioaddr, int mode, 308 u32 channel, int fifosz, u8 qmode) 309 { 310 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 311 unsigned int tqs = fifosz / 256 - 1; 312 313 if (mode == SF_DMA_MODE) { 314 pr_debug("GMAC: enable TX store and forward mode\n"); 315 /* Transmit COE type 2 cannot be done in cut-through mode. */ 316 mtl_tx_op |= MTL_OP_MODE_TSF; 317 } else { 318 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); 319 mtl_tx_op &= ~MTL_OP_MODE_TSF; 320 mtl_tx_op &= MTL_OP_MODE_TTC_MASK; 321 /* Set the transmit threshold */ 322 if (mode <= 32) 323 mtl_tx_op |= MTL_OP_MODE_TTC_32; 324 else if (mode <= 64) 325 mtl_tx_op |= MTL_OP_MODE_TTC_64; 326 else if (mode <= 96) 327 mtl_tx_op |= MTL_OP_MODE_TTC_96; 328 else if (mode <= 128) 329 mtl_tx_op |= MTL_OP_MODE_TTC_128; 330 else if (mode <= 192) 331 mtl_tx_op |= MTL_OP_MODE_TTC_192; 332 else if (mode <= 256) 333 mtl_tx_op |= MTL_OP_MODE_TTC_256; 334 else if (mode <= 384) 335 mtl_tx_op |= MTL_OP_MODE_TTC_384; 336 else 337 mtl_tx_op |= MTL_OP_MODE_TTC_512; 338 } 339 /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO 340 * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE. 341 * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W 342 * with reset values: TXQEN off, TQS 256 bytes. 343 * 344 * TXQEN must be written for multi-channel operation and TQS must 345 * reflect the available fifo size per queue (total fifo size / number 346 * of enabled queues). 347 */ 348 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; 349 if (qmode != MTL_QUEUE_AVB) 350 mtl_tx_op |= MTL_OP_MODE_TXQEN; 351 else 352 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; 353 mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK; 354 mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT; 355 356 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 357 } 358 359 static int dwmac4_get_hw_feature(void __iomem *ioaddr, 360 struct dma_features *dma_cap) 361 { 362 u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0); 363 364 /* MAC HW feature0 */ 365 dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL); 366 dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1; 367 dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2; 368 dma_cap->vlhash = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4; 369 dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18; 370 dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3; 371 dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5; 372 dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6; 373 dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7; 374 /* MMC */ 375 dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8; 376 /* IEEE 1588-2008 */ 377 dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12; 378 /* 802.3az - Energy-Efficient Ethernet (EEE) */ 379 dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13; 380 /* TX and RX csum */ 381 dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14; 382 dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16; 383 dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27; 384 dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9; 385 386 /* MAC HW feature1 */ 387 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); 388 dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27; 389 dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24; 390 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; 391 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; 392 dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17; 393 394 dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14; 395 switch (dma_cap->addr64) { 396 case 0: 397 dma_cap->addr64 = 32; 398 break; 399 case 1: 400 dma_cap->addr64 = 40; 401 break; 402 case 2: 403 dma_cap->addr64 = 48; 404 break; 405 default: 406 dma_cap->addr64 = 32; 407 break; 408 } 409 410 /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by 411 * shifting and store the sizes in bytes. 412 */ 413 dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6); 414 dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0); 415 /* MAC HW feature2 */ 416 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); 417 /* TX and RX number of channels */ 418 dma_cap->number_rx_channel = 419 ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; 420 dma_cap->number_tx_channel = 421 ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; 422 /* TX and RX number of queues */ 423 dma_cap->number_rx_queues = 424 ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; 425 dma_cap->number_tx_queues = 426 ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; 427 /* PPS output */ 428 dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24; 429 430 /* IEEE 1588-2002 */ 431 dma_cap->time_stamp = 0; 432 /* Number of Auxiliary Snapshot Inputs */ 433 dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28; 434 435 /* MAC HW feature3 */ 436 hw_cap = readl(ioaddr + GMAC_HW_FEATURE3); 437 438 /* 5.10 Features */ 439 dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28; 440 dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27; 441 dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26; 442 dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20; 443 dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17; 444 dma_cap->estsel = (hw_cap & GMAC_HW_FEAT_ESTSEL) >> 16; 445 dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13; 446 dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11; 447 dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10; 448 dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5; 449 450 return 0; 451 } 452 453 /* Enable/disable TSO feature and set MSS */ 454 static void dwmac4_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr, 455 bool en, u32 chan) 456 { 457 u32 value; 458 459 if (en) { 460 /* enable TSO */ 461 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); 462 writel(value | DMA_CONTROL_TSE, 463 ioaddr + DMA_CHAN_TX_CONTROL(chan)); 464 } else { 465 /* enable TSO */ 466 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); 467 writel(value & ~DMA_CONTROL_TSE, 468 ioaddr + DMA_CHAN_TX_CONTROL(chan)); 469 } 470 } 471 472 static void dwmac4_qmode(struct stmmac_priv *priv, void __iomem *ioaddr, 473 u32 channel, u8 qmode) 474 { 475 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 476 477 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; 478 if (qmode != MTL_QUEUE_AVB) 479 mtl_tx_op |= MTL_OP_MODE_TXQEN; 480 else 481 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; 482 483 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 484 } 485 486 static void dwmac4_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr, 487 int bfsize, u32 chan) 488 { 489 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); 490 491 value &= ~DMA_RBSZ_MASK; 492 value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; 493 494 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); 495 } 496 497 static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr, 498 bool en, u32 chan) 499 { 500 u32 value = readl(ioaddr + GMAC_EXT_CONFIG); 501 502 value &= ~GMAC_CONFIG_HDSMS; 503 value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ 504 writel(value, ioaddr + GMAC_EXT_CONFIG); 505 506 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); 507 if (en) 508 value |= DMA_CONTROL_SPH; 509 else 510 value &= ~DMA_CONTROL_SPH; 511 writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); 512 } 513 514 static int dwmac4_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr, 515 bool en, u32 chan) 516 { 517 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); 518 519 if (en) 520 value |= DMA_CONTROL_EDSE; 521 else 522 value &= ~DMA_CONTROL_EDSE; 523 524 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); 525 526 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)) & DMA_CONTROL_EDSE; 527 if (en && !value) 528 return -EIO; 529 530 writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL); 531 return 0; 532 } 533 534 const struct stmmac_dma_ops dwmac4_dma_ops = { 535 .reset = dwmac4_dma_reset, 536 .init = dwmac4_dma_init, 537 .init_chan = dwmac4_dma_init_channel, 538 .init_rx_chan = dwmac4_dma_init_rx_chan, 539 .init_tx_chan = dwmac4_dma_init_tx_chan, 540 .axi = dwmac4_dma_axi, 541 .dump_regs = dwmac4_dump_dma_regs, 542 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, 543 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, 544 .enable_dma_irq = dwmac4_enable_dma_irq, 545 .disable_dma_irq = dwmac4_disable_dma_irq, 546 .start_tx = dwmac4_dma_start_tx, 547 .stop_tx = dwmac4_dma_stop_tx, 548 .start_rx = dwmac4_dma_start_rx, 549 .stop_rx = dwmac4_dma_stop_rx, 550 .dma_interrupt = dwmac4_dma_interrupt, 551 .get_hw_feature = dwmac4_get_hw_feature, 552 .rx_watchdog = dwmac4_rx_watchdog, 553 .set_rx_ring_len = dwmac4_set_rx_ring_len, 554 .set_tx_ring_len = dwmac4_set_tx_ring_len, 555 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 556 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 557 .enable_tso = dwmac4_enable_tso, 558 .qmode = dwmac4_qmode, 559 .set_bfsize = dwmac4_set_bfsize, 560 .enable_sph = dwmac4_enable_sph, 561 }; 562 563 const struct stmmac_dma_ops dwmac410_dma_ops = { 564 .reset = dwmac4_dma_reset, 565 .init = dwmac4_dma_init, 566 .init_chan = dwmac410_dma_init_channel, 567 .init_rx_chan = dwmac4_dma_init_rx_chan, 568 .init_tx_chan = dwmac4_dma_init_tx_chan, 569 .axi = dwmac4_dma_axi, 570 .dump_regs = dwmac4_dump_dma_regs, 571 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, 572 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, 573 .enable_dma_irq = dwmac410_enable_dma_irq, 574 .disable_dma_irq = dwmac4_disable_dma_irq, 575 .start_tx = dwmac4_dma_start_tx, 576 .stop_tx = dwmac4_dma_stop_tx, 577 .start_rx = dwmac4_dma_start_rx, 578 .stop_rx = dwmac4_dma_stop_rx, 579 .dma_interrupt = dwmac4_dma_interrupt, 580 .get_hw_feature = dwmac4_get_hw_feature, 581 .rx_watchdog = dwmac4_rx_watchdog, 582 .set_rx_ring_len = dwmac4_set_rx_ring_len, 583 .set_tx_ring_len = dwmac4_set_tx_ring_len, 584 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 585 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 586 .enable_tso = dwmac4_enable_tso, 587 .qmode = dwmac4_qmode, 588 .set_bfsize = dwmac4_set_bfsize, 589 .enable_sph = dwmac4_enable_sph, 590 .enable_tbs = dwmac4_enable_tbs, 591 }; 592