1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. 4 * DWC Ether MAC version 4.xx has been used for developing this code. 5 * 6 * This contains the functions to handle the dma. 7 * 8 * Copyright (C) 2015 STMicroelectronics Ltd 9 * 10 * Author: Alexandre Torgue <alexandre.torgue@st.com> 11 */ 12 13 #include <linux/io.h> 14 #include "dwmac4.h" 15 #include "dwmac4_dma.h" 16 17 static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) 18 { 19 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); 20 int i; 21 22 pr_info("dwmac4: Master AXI performs %s burst length\n", 23 (value & DMA_SYS_BUS_FB) ? "fixed" : "any"); 24 25 if (axi->axi_lpi_en) 26 value |= DMA_AXI_EN_LPI; 27 if (axi->axi_xit_frm) 28 value |= DMA_AXI_LPI_XIT_FRM; 29 30 value &= ~DMA_AXI_WR_OSR_LMT; 31 value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) << 32 DMA_AXI_WR_OSR_LMT_SHIFT; 33 34 value &= ~DMA_AXI_RD_OSR_LMT; 35 value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) << 36 DMA_AXI_RD_OSR_LMT_SHIFT; 37 38 /* Depending on the UNDEF bit the Master AXI will perform any burst 39 * length according to the BLEN programmed (by default all BLEN are 40 * set). 41 */ 42 for (i = 0; i < AXI_BLEN; i++) { 43 switch (axi->axi_blen[i]) { 44 case 256: 45 value |= DMA_AXI_BLEN256; 46 break; 47 case 128: 48 value |= DMA_AXI_BLEN128; 49 break; 50 case 64: 51 value |= DMA_AXI_BLEN64; 52 break; 53 case 32: 54 value |= DMA_AXI_BLEN32; 55 break; 56 case 16: 57 value |= DMA_AXI_BLEN16; 58 break; 59 case 8: 60 value |= DMA_AXI_BLEN8; 61 break; 62 case 4: 63 value |= DMA_AXI_BLEN4; 64 break; 65 } 66 } 67 68 writel(value, ioaddr + DMA_SYS_BUS_MODE); 69 } 70 71 static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr, 72 struct stmmac_dma_cfg *dma_cfg, 73 dma_addr_t dma_rx_phy, u32 chan) 74 { 75 u32 value; 76 u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; 77 78 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); 79 value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); 80 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); 81 82 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) 83 writel(upper_32_bits(dma_rx_phy), 84 ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(chan)); 85 86 writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan)); 87 } 88 89 static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, 90 struct stmmac_dma_cfg *dma_cfg, 91 dma_addr_t dma_tx_phy, u32 chan) 92 { 93 u32 value; 94 u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; 95 96 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); 97 value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); 98 99 /* Enable OSP to get best performance */ 100 value |= DMA_CONTROL_OSP; 101 102 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); 103 104 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) 105 writel(upper_32_bits(dma_tx_phy), 106 ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(chan)); 107 108 writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); 109 } 110 111 static void dwmac4_dma_init_channel(void __iomem *ioaddr, 112 struct stmmac_dma_cfg *dma_cfg, u32 chan) 113 { 114 u32 value; 115 116 /* common channel control register config */ 117 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); 118 if (dma_cfg->pblx8) 119 value = value | DMA_BUS_MODE_PBL; 120 writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); 121 122 /* Mask interrupts by writing to CSR7 */ 123 writel(DMA_CHAN_INTR_DEFAULT_MASK, 124 ioaddr + DMA_CHAN_INTR_ENA(chan)); 125 } 126 127 static void dwmac4_dma_init(void __iomem *ioaddr, 128 struct stmmac_dma_cfg *dma_cfg, int atds) 129 { 130 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); 131 132 /* Set the Fixed burst mode */ 133 if (dma_cfg->fixed_burst) 134 value |= DMA_SYS_BUS_FB; 135 136 /* Mixed Burst has no effect when fb is set */ 137 if (dma_cfg->mixed_burst) 138 value |= DMA_SYS_BUS_MB; 139 140 if (dma_cfg->aal) 141 value |= DMA_SYS_BUS_AAL; 142 143 if (dma_cfg->eame) 144 value |= DMA_SYS_BUS_EAME; 145 146 writel(value, ioaddr + DMA_SYS_BUS_MODE); 147 } 148 149 static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, 150 u32 *reg_space) 151 { 152 reg_space[DMA_CHAN_CONTROL(channel) / 4] = 153 readl(ioaddr + DMA_CHAN_CONTROL(channel)); 154 reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] = 155 readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); 156 reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] = 157 readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); 158 reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] = 159 readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); 160 reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] = 161 readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); 162 reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] = 163 readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)); 164 reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] = 165 readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)); 166 reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] = 167 readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)); 168 reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] = 169 readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)); 170 reg_space[DMA_CHAN_INTR_ENA(channel) / 4] = 171 readl(ioaddr + DMA_CHAN_INTR_ENA(channel)); 172 reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] = 173 readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)); 174 reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] = 175 readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)); 176 reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] = 177 readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)); 178 reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] = 179 readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)); 180 reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] = 181 readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)); 182 reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] = 183 readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)); 184 reg_space[DMA_CHAN_STATUS(channel) / 4] = 185 readl(ioaddr + DMA_CHAN_STATUS(channel)); 186 } 187 188 static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) 189 { 190 int i; 191 192 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) 193 _dwmac4_dump_dma_regs(ioaddr, i, reg_space); 194 } 195 196 static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan) 197 { 198 u32 chan; 199 200 for (chan = 0; chan < number_chan; chan++) 201 writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan)); 202 } 203 204 static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, 205 u32 channel, int fifosz, u8 qmode) 206 { 207 unsigned int rqs = fifosz / 256 - 1; 208 u32 mtl_rx_op, mtl_rx_int; 209 210 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); 211 212 if (mode == SF_DMA_MODE) { 213 pr_debug("GMAC: enable RX store and forward mode\n"); 214 mtl_rx_op |= MTL_OP_MODE_RSF; 215 } else { 216 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); 217 mtl_rx_op &= ~MTL_OP_MODE_RSF; 218 mtl_rx_op &= MTL_OP_MODE_RTC_MASK; 219 if (mode <= 32) 220 mtl_rx_op |= MTL_OP_MODE_RTC_32; 221 else if (mode <= 64) 222 mtl_rx_op |= MTL_OP_MODE_RTC_64; 223 else if (mode <= 96) 224 mtl_rx_op |= MTL_OP_MODE_RTC_96; 225 else 226 mtl_rx_op |= MTL_OP_MODE_RTC_128; 227 } 228 229 mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK; 230 mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; 231 232 /* Enable flow control only if each channel gets 4 KiB or more FIFO and 233 * only if channel is not an AVB channel. 234 */ 235 if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) { 236 unsigned int rfd, rfa; 237 238 mtl_rx_op |= MTL_OP_MODE_EHFC; 239 240 /* Set Threshold for Activating Flow Control to min 2 frames, 241 * i.e. 1500 * 2 = 3000 bytes. 242 * 243 * Set Threshold for Deactivating Flow Control to min 1 frame, 244 * i.e. 1500 bytes. 245 */ 246 switch (fifosz) { 247 case 4096: 248 /* This violates the above formula because of FIFO size 249 * limit therefore overflow may occur in spite of this. 250 */ 251 rfd = 0x03; /* Full-2.5K */ 252 rfa = 0x01; /* Full-1.5K */ 253 break; 254 255 case 8192: 256 rfd = 0x06; /* Full-4K */ 257 rfa = 0x0a; /* Full-6K */ 258 break; 259 260 case 16384: 261 rfd = 0x06; /* Full-4K */ 262 rfa = 0x12; /* Full-10K */ 263 break; 264 265 default: 266 rfd = 0x06; /* Full-4K */ 267 rfa = 0x1e; /* Full-16K */ 268 break; 269 } 270 271 mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK; 272 mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT; 273 274 mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK; 275 mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT; 276 } 277 278 writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); 279 280 /* Enable MTL RX overflow */ 281 mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel)); 282 writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN, 283 ioaddr + MTL_CHAN_INT_CTRL(channel)); 284 } 285 286 static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, 287 u32 channel, int fifosz, u8 qmode) 288 { 289 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 290 unsigned int tqs = fifosz / 256 - 1; 291 292 if (mode == SF_DMA_MODE) { 293 pr_debug("GMAC: enable TX store and forward mode\n"); 294 /* Transmit COE type 2 cannot be done in cut-through mode. */ 295 mtl_tx_op |= MTL_OP_MODE_TSF; 296 } else { 297 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); 298 mtl_tx_op &= ~MTL_OP_MODE_TSF; 299 mtl_tx_op &= MTL_OP_MODE_TTC_MASK; 300 /* Set the transmit threshold */ 301 if (mode <= 32) 302 mtl_tx_op |= MTL_OP_MODE_TTC_32; 303 else if (mode <= 64) 304 mtl_tx_op |= MTL_OP_MODE_TTC_64; 305 else if (mode <= 96) 306 mtl_tx_op |= MTL_OP_MODE_TTC_96; 307 else if (mode <= 128) 308 mtl_tx_op |= MTL_OP_MODE_TTC_128; 309 else if (mode <= 192) 310 mtl_tx_op |= MTL_OP_MODE_TTC_192; 311 else if (mode <= 256) 312 mtl_tx_op |= MTL_OP_MODE_TTC_256; 313 else if (mode <= 384) 314 mtl_tx_op |= MTL_OP_MODE_TTC_384; 315 else 316 mtl_tx_op |= MTL_OP_MODE_TTC_512; 317 } 318 /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO 319 * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE. 320 * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W 321 * with reset values: TXQEN off, TQS 256 bytes. 322 * 323 * TXQEN must be written for multi-channel operation and TQS must 324 * reflect the available fifo size per queue (total fifo size / number 325 * of enabled queues). 326 */ 327 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; 328 if (qmode != MTL_QUEUE_AVB) 329 mtl_tx_op |= MTL_OP_MODE_TXQEN; 330 else 331 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; 332 mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK; 333 mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT; 334 335 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 336 } 337 338 static void dwmac4_get_hw_feature(void __iomem *ioaddr, 339 struct dma_features *dma_cap) 340 { 341 u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0); 342 343 /* MAC HW feature0 */ 344 dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL); 345 dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1; 346 dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2; 347 dma_cap->vlhash = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4; 348 dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18; 349 dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3; 350 dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5; 351 dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6; 352 dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7; 353 /* MMC */ 354 dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8; 355 /* IEEE 1588-2008 */ 356 dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12; 357 /* 802.3az - Energy-Efficient Ethernet (EEE) */ 358 dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13; 359 /* TX and RX csum */ 360 dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14; 361 dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16; 362 dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27; 363 dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9; 364 365 /* MAC HW feature1 */ 366 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); 367 dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27; 368 dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24; 369 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; 370 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; 371 dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17; 372 373 dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14; 374 switch (dma_cap->addr64) { 375 case 0: 376 dma_cap->addr64 = 32; 377 break; 378 case 1: 379 dma_cap->addr64 = 40; 380 break; 381 case 2: 382 dma_cap->addr64 = 48; 383 break; 384 default: 385 dma_cap->addr64 = 32; 386 break; 387 } 388 389 /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by 390 * shifting and store the sizes in bytes. 391 */ 392 dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6); 393 dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0); 394 /* MAC HW feature2 */ 395 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); 396 /* TX and RX number of channels */ 397 dma_cap->number_rx_channel = 398 ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; 399 dma_cap->number_tx_channel = 400 ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; 401 /* TX and RX number of queues */ 402 dma_cap->number_rx_queues = 403 ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; 404 dma_cap->number_tx_queues = 405 ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; 406 /* PPS output */ 407 dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24; 408 409 /* IEEE 1588-2002 */ 410 dma_cap->time_stamp = 0; 411 412 /* MAC HW feature3 */ 413 hw_cap = readl(ioaddr + GMAC_HW_FEATURE3); 414 415 /* 5.10 Features */ 416 dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28; 417 dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13; 418 dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11; 419 dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10; 420 dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5; 421 } 422 423 /* Enable/disable TSO feature and set MSS */ 424 static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) 425 { 426 u32 value; 427 428 if (en) { 429 /* enable TSO */ 430 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); 431 writel(value | DMA_CONTROL_TSE, 432 ioaddr + DMA_CHAN_TX_CONTROL(chan)); 433 } else { 434 /* enable TSO */ 435 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); 436 writel(value & ~DMA_CONTROL_TSE, 437 ioaddr + DMA_CHAN_TX_CONTROL(chan)); 438 } 439 } 440 441 static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) 442 { 443 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 444 445 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; 446 if (qmode != MTL_QUEUE_AVB) 447 mtl_tx_op |= MTL_OP_MODE_TXQEN; 448 else 449 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; 450 451 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 452 } 453 454 static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) 455 { 456 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); 457 458 value &= ~DMA_RBSZ_MASK; 459 value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; 460 461 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); 462 } 463 464 static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan) 465 { 466 u32 value = readl(ioaddr + GMAC_EXT_CONFIG); 467 468 value &= ~GMAC_CONFIG_HDSMS; 469 value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ 470 writel(value, ioaddr + GMAC_EXT_CONFIG); 471 472 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); 473 if (en) 474 value |= DMA_CONTROL_SPH; 475 else 476 value &= ~DMA_CONTROL_SPH; 477 writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); 478 } 479 480 const struct stmmac_dma_ops dwmac4_dma_ops = { 481 .reset = dwmac4_dma_reset, 482 .init = dwmac4_dma_init, 483 .init_chan = dwmac4_dma_init_channel, 484 .init_rx_chan = dwmac4_dma_init_rx_chan, 485 .init_tx_chan = dwmac4_dma_init_tx_chan, 486 .axi = dwmac4_dma_axi, 487 .dump_regs = dwmac4_dump_dma_regs, 488 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, 489 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, 490 .enable_dma_irq = dwmac4_enable_dma_irq, 491 .disable_dma_irq = dwmac4_disable_dma_irq, 492 .start_tx = dwmac4_dma_start_tx, 493 .stop_tx = dwmac4_dma_stop_tx, 494 .start_rx = dwmac4_dma_start_rx, 495 .stop_rx = dwmac4_dma_stop_rx, 496 .dma_interrupt = dwmac4_dma_interrupt, 497 .get_hw_feature = dwmac4_get_hw_feature, 498 .rx_watchdog = dwmac4_rx_watchdog, 499 .set_rx_ring_len = dwmac4_set_rx_ring_len, 500 .set_tx_ring_len = dwmac4_set_tx_ring_len, 501 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 502 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 503 .enable_tso = dwmac4_enable_tso, 504 .qmode = dwmac4_qmode, 505 .set_bfsize = dwmac4_set_bfsize, 506 .enable_sph = dwmac4_enable_sph, 507 }; 508 509 const struct stmmac_dma_ops dwmac410_dma_ops = { 510 .reset = dwmac4_dma_reset, 511 .init = dwmac4_dma_init, 512 .init_chan = dwmac4_dma_init_channel, 513 .init_rx_chan = dwmac4_dma_init_rx_chan, 514 .init_tx_chan = dwmac4_dma_init_tx_chan, 515 .axi = dwmac4_dma_axi, 516 .dump_regs = dwmac4_dump_dma_regs, 517 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, 518 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, 519 .enable_dma_irq = dwmac410_enable_dma_irq, 520 .disable_dma_irq = dwmac4_disable_dma_irq, 521 .start_tx = dwmac4_dma_start_tx, 522 .stop_tx = dwmac4_dma_stop_tx, 523 .start_rx = dwmac4_dma_start_rx, 524 .stop_rx = dwmac4_dma_stop_rx, 525 .dma_interrupt = dwmac4_dma_interrupt, 526 .get_hw_feature = dwmac4_get_hw_feature, 527 .rx_watchdog = dwmac4_rx_watchdog, 528 .set_rx_ring_len = dwmac4_set_rx_ring_len, 529 .set_tx_ring_len = dwmac4_set_tx_ring_len, 530 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 531 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 532 .enable_tso = dwmac4_enable_tso, 533 .qmode = dwmac4_qmode, 534 .set_bfsize = dwmac4_set_bfsize, 535 .enable_sph = dwmac4_enable_sph, 536 }; 537