1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016, NVIDIA CORPORATION. 4 * 5 * Portions based on U-Boot's rtl8169.c. 6 */ 7 8 /* 9 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 10 * Service) IP block. The IP supports multiple options for bus type, clocking/ 11 * reset structure, and feature list. 12 * 13 * The driver is written such that generic core logic is kept separate from 14 * configuration-specific logic. Code that interacts with configuration- 15 * specific resources is split out into separate functions to avoid polluting 16 * common code. If/when this driver is enhanced to support multiple 17 * configurations, the core code should be adapted to call all configuration- 18 * specific functions through function pointers, with the definition of those 19 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 20 * field. 21 * 22 * The following configurations are currently supported: 23 * tegra186: 24 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 25 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 26 * supports a single RGMII PHY. This configuration also has SW control over 27 * all clock and reset signals to the HW block. 28 */ 29 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 44 /* Core registers */ 45 46 #define EQOS_MAC_REGS_BASE 0x000 47 struct eqos_mac_regs { 48 uint32_t configuration; /* 0x000 */ 49 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 50 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 51 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 52 uint32_t rx_flow_ctrl; /* 0x090 */ 53 uint32_t unused_094; /* 0x094 */ 54 uint32_t txq_prty_map0; /* 0x098 */ 55 uint32_t unused_09c; /* 0x09c */ 56 uint32_t rxq_ctrl0; /* 0x0a0 */ 57 uint32_t unused_0a4; /* 0x0a4 */ 58 uint32_t rxq_ctrl2; /* 0x0a8 */ 59 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 60 uint32_t us_tic_counter; /* 0x0dc */ 61 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 62 uint32_t hw_feature0; /* 0x11c */ 63 uint32_t hw_feature1; /* 0x120 */ 64 uint32_t hw_feature2; /* 0x124 */ 65 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 66 uint32_t mdio_address; /* 0x200 */ 67 uint32_t mdio_data; /* 0x204 */ 68 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 69 uint32_t address0_high; /* 0x300 */ 70 uint32_t address0_low; /* 0x304 */ 71 }; 72 73 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 74 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 75 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 76 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 77 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 78 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 79 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 80 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 81 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 82 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 83 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 84 85 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 86 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 87 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 88 89 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 90 91 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 92 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 93 94 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 95 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 96 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0 97 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2 98 99 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 100 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 101 102 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 103 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 104 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 105 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 106 107 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 108 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 109 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 110 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 111 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 112 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 113 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 114 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 115 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 116 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 117 118 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 119 120 #define EQOS_MTL_REGS_BASE 0xd00 121 struct eqos_mtl_regs { 122 uint32_t txq0_operation_mode; /* 0xd00 */ 123 uint32_t unused_d04; /* 0xd04 */ 124 uint32_t txq0_debug; /* 0xd08 */ 125 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 126 uint32_t txq0_quantum_weight; /* 0xd18 */ 127 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 128 uint32_t rxq0_operation_mode; /* 0xd30 */ 129 uint32_t unused_d34; /* 0xd34 */ 130 uint32_t rxq0_debug; /* 0xd38 */ 131 }; 132 133 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 134 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 135 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 136 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 137 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 138 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 139 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 140 141 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 142 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 143 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 144 145 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 146 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 147 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 148 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 149 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 150 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 151 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 152 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 153 154 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 155 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 156 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 157 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 158 159 #define EQOS_DMA_REGS_BASE 0x1000 160 struct eqos_dma_regs { 161 uint32_t mode; /* 0x1000 */ 162 uint32_t sysbus_mode; /* 0x1004 */ 163 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 164 uint32_t ch0_control; /* 0x1100 */ 165 uint32_t ch0_tx_control; /* 0x1104 */ 166 uint32_t ch0_rx_control; /* 0x1108 */ 167 uint32_t unused_110c; /* 0x110c */ 168 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 169 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 170 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 171 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 172 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 173 uint32_t unused_1124; /* 0x1124 */ 174 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 175 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 176 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 177 }; 178 179 #define EQOS_DMA_MODE_SWR BIT(0) 180 181 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 182 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 183 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 184 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 185 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 186 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 187 188 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 189 190 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 191 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 192 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 193 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 194 195 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 196 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 197 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 198 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 199 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 200 201 /* These registers are Tegra186-specific */ 202 #define EQOS_TEGRA186_REGS_BASE 0x8800 203 struct eqos_tegra186_regs { 204 uint32_t sdmemcomppadctrl; /* 0x8800 */ 205 uint32_t auto_cal_config; /* 0x8804 */ 206 uint32_t unused_8808; /* 0x8808 */ 207 uint32_t auto_cal_status; /* 0x880c */ 208 }; 209 210 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 211 212 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 213 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 214 215 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 216 217 /* Descriptors */ 218 219 #define EQOS_DESCRIPTOR_WORDS 4 220 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 221 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 222 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 223 #define EQOS_DESCRIPTORS_TX 4 224 #define EQOS_DESCRIPTORS_RX 4 225 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 226 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 227 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 228 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 229 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 230 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 231 232 /* 233 * Warn if the cache-line size is larger than the descriptor size. In such 234 * cases the driver will likely fail because the CPU needs to flush the cache 235 * when requeuing RX buffers, therefore descriptors written by the hardware 236 * may be discarded. Architectures with full IO coherence, such as x86, do not 237 * experience this issue, and hence are excluded from this condition. 238 * 239 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 240 * the driver to allocate descriptors from a pool of non-cached memory. 241 */ 242 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 243 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 244 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 245 #warning Cache line size is larger than descriptor size 246 #endif 247 #endif 248 249 struct eqos_desc { 250 u32 des0; 251 u32 des1; 252 u32 des2; 253 u32 des3; 254 }; 255 256 #define EQOS_DESC3_OWN BIT(31) 257 #define EQOS_DESC3_FD BIT(29) 258 #define EQOS_DESC3_LD BIT(28) 259 #define EQOS_DESC3_BUF1V BIT(24) 260 261 struct eqos_config { 262 bool reg_access_always_ok; 263 }; 264 265 struct eqos_priv { 266 struct udevice *dev; 267 const struct eqos_config *config; 268 fdt_addr_t regs; 269 struct eqos_mac_regs *mac_regs; 270 struct eqos_mtl_regs *mtl_regs; 271 struct eqos_dma_regs *dma_regs; 272 struct eqos_tegra186_regs *tegra186_regs; 273 struct reset_ctl reset_ctl; 274 struct gpio_desc phy_reset_gpio; 275 struct clk clk_master_bus; 276 struct clk clk_rx; 277 struct clk clk_ptp_ref; 278 struct clk clk_tx; 279 struct clk clk_slave_bus; 280 struct mii_dev *mii; 281 struct phy_device *phy; 282 void *descs; 283 struct eqos_desc *tx_descs; 284 struct eqos_desc *rx_descs; 285 int tx_desc_idx, rx_desc_idx; 286 void *tx_dma_buf; 287 void *rx_dma_buf; 288 void *rx_pkt; 289 bool started; 290 bool reg_access_ok; 291 }; 292 293 /* 294 * TX and RX descriptors are 16 bytes. This causes problems with the cache 295 * maintenance on CPUs where the cache-line size exceeds the size of these 296 * descriptors. What will happen is that when the driver receives a packet 297 * it will be immediately requeued for the hardware to reuse. The CPU will 298 * therefore need to flush the cache-line containing the descriptor, which 299 * will cause all other descriptors in the same cache-line to be flushed 300 * along with it. If one of those descriptors had been written to by the 301 * device those changes (and the associated packet) will be lost. 302 * 303 * To work around this, we make use of non-cached memory if available. If 304 * descriptors are mapped uncached there's no need to manually flush them 305 * or invalidate them. 306 * 307 * Note that this only applies to descriptors. The packet data buffers do 308 * not have the same constraints since they are 1536 bytes large, so they 309 * are unlikely to share cache-lines. 310 */ 311 static void *eqos_alloc_descs(unsigned int num) 312 { 313 #ifdef CONFIG_SYS_NONCACHED_MEMORY 314 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 315 EQOS_DESCRIPTOR_ALIGN); 316 #else 317 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 318 #endif 319 } 320 321 static void eqos_free_descs(void *descs) 322 { 323 #ifdef CONFIG_SYS_NONCACHED_MEMORY 324 /* FIXME: noncached_alloc() has no opposite */ 325 #else 326 free(descs); 327 #endif 328 } 329 330 static void eqos_inval_desc(void *desc) 331 { 332 #ifndef CONFIG_SYS_NONCACHED_MEMORY 333 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 334 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 335 ARCH_DMA_MINALIGN); 336 337 invalidate_dcache_range(start, end); 338 #endif 339 } 340 341 static void eqos_flush_desc(void *desc) 342 { 343 #ifndef CONFIG_SYS_NONCACHED_MEMORY 344 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 345 #endif 346 } 347 348 static void eqos_inval_buffer(void *buf, size_t size) 349 { 350 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 351 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 352 353 invalidate_dcache_range(start, end); 354 } 355 356 static void eqos_flush_buffer(void *buf, size_t size) 357 { 358 flush_cache((unsigned long)buf, size); 359 } 360 361 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 362 { 363 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 364 EQOS_MAC_MDIO_ADDRESS_GB, false, 365 1000000, true); 366 } 367 368 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 369 int mdio_reg) 370 { 371 struct eqos_priv *eqos = bus->priv; 372 u32 val; 373 int ret; 374 375 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 376 mdio_reg); 377 378 ret = eqos_mdio_wait_idle(eqos); 379 if (ret) { 380 pr_err("MDIO not idle at entry"); 381 return ret; 382 } 383 384 val = readl(&eqos->mac_regs->mdio_address); 385 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 386 EQOS_MAC_MDIO_ADDRESS_C45E; 387 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 388 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 389 (EQOS_MAC_MDIO_ADDRESS_CR_20_35 << 390 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 391 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 392 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 393 EQOS_MAC_MDIO_ADDRESS_GB; 394 writel(val, &eqos->mac_regs->mdio_address); 395 396 udelay(10); 397 398 ret = eqos_mdio_wait_idle(eqos); 399 if (ret) { 400 pr_err("MDIO read didn't complete"); 401 return ret; 402 } 403 404 val = readl(&eqos->mac_regs->mdio_data); 405 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 406 407 debug("%s: val=%x\n", __func__, val); 408 409 return val; 410 } 411 412 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 413 int mdio_reg, u16 mdio_val) 414 { 415 struct eqos_priv *eqos = bus->priv; 416 u32 val; 417 int ret; 418 419 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 420 mdio_addr, mdio_reg, mdio_val); 421 422 ret = eqos_mdio_wait_idle(eqos); 423 if (ret) { 424 pr_err("MDIO not idle at entry"); 425 return ret; 426 } 427 428 writel(mdio_val, &eqos->mac_regs->mdio_data); 429 430 val = readl(&eqos->mac_regs->mdio_address); 431 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 432 EQOS_MAC_MDIO_ADDRESS_C45E; 433 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 434 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 435 (EQOS_MAC_MDIO_ADDRESS_CR_20_35 << 436 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 437 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 438 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 439 EQOS_MAC_MDIO_ADDRESS_GB; 440 writel(val, &eqos->mac_regs->mdio_address); 441 442 udelay(10); 443 444 ret = eqos_mdio_wait_idle(eqos); 445 if (ret) { 446 pr_err("MDIO read didn't complete"); 447 return ret; 448 } 449 450 return 0; 451 } 452 453 static int eqos_start_clks_tegra186(struct udevice *dev) 454 { 455 struct eqos_priv *eqos = dev_get_priv(dev); 456 int ret; 457 458 debug("%s(dev=%p):\n", __func__, dev); 459 460 ret = clk_enable(&eqos->clk_slave_bus); 461 if (ret < 0) { 462 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 463 goto err; 464 } 465 466 ret = clk_enable(&eqos->clk_master_bus); 467 if (ret < 0) { 468 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 469 goto err_disable_clk_slave_bus; 470 } 471 472 ret = clk_enable(&eqos->clk_rx); 473 if (ret < 0) { 474 pr_err("clk_enable(clk_rx) failed: %d", ret); 475 goto err_disable_clk_master_bus; 476 } 477 478 ret = clk_enable(&eqos->clk_ptp_ref); 479 if (ret < 0) { 480 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 481 goto err_disable_clk_rx; 482 } 483 484 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 485 if (ret < 0) { 486 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 487 goto err_disable_clk_ptp_ref; 488 } 489 490 ret = clk_enable(&eqos->clk_tx); 491 if (ret < 0) { 492 pr_err("clk_enable(clk_tx) failed: %d", ret); 493 goto err_disable_clk_ptp_ref; 494 } 495 496 debug("%s: OK\n", __func__); 497 return 0; 498 499 err_disable_clk_ptp_ref: 500 clk_disable(&eqos->clk_ptp_ref); 501 err_disable_clk_rx: 502 clk_disable(&eqos->clk_rx); 503 err_disable_clk_master_bus: 504 clk_disable(&eqos->clk_master_bus); 505 err_disable_clk_slave_bus: 506 clk_disable(&eqos->clk_slave_bus); 507 err: 508 debug("%s: FAILED: %d\n", __func__, ret); 509 return ret; 510 } 511 512 void eqos_stop_clks_tegra186(struct udevice *dev) 513 { 514 struct eqos_priv *eqos = dev_get_priv(dev); 515 516 debug("%s(dev=%p):\n", __func__, dev); 517 518 clk_disable(&eqos->clk_tx); 519 clk_disable(&eqos->clk_ptp_ref); 520 clk_disable(&eqos->clk_rx); 521 clk_disable(&eqos->clk_master_bus); 522 clk_disable(&eqos->clk_slave_bus); 523 524 debug("%s: OK\n", __func__); 525 } 526 527 static int eqos_start_resets_tegra186(struct udevice *dev) 528 { 529 struct eqos_priv *eqos = dev_get_priv(dev); 530 int ret; 531 532 debug("%s(dev=%p):\n", __func__, dev); 533 534 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 535 if (ret < 0) { 536 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 537 return ret; 538 } 539 540 udelay(2); 541 542 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 543 if (ret < 0) { 544 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 545 return ret; 546 } 547 548 ret = reset_assert(&eqos->reset_ctl); 549 if (ret < 0) { 550 pr_err("reset_assert() failed: %d", ret); 551 return ret; 552 } 553 554 udelay(2); 555 556 ret = reset_deassert(&eqos->reset_ctl); 557 if (ret < 0) { 558 pr_err("reset_deassert() failed: %d", ret); 559 return ret; 560 } 561 562 debug("%s: OK\n", __func__); 563 return 0; 564 } 565 566 static int eqos_stop_resets_tegra186(struct udevice *dev) 567 { 568 struct eqos_priv *eqos = dev_get_priv(dev); 569 570 reset_assert(&eqos->reset_ctl); 571 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 572 573 return 0; 574 } 575 576 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 577 { 578 struct eqos_priv *eqos = dev_get_priv(dev); 579 int ret; 580 581 debug("%s(dev=%p):\n", __func__, dev); 582 583 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 584 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 585 586 udelay(1); 587 588 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 589 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 590 591 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 592 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 593 if (ret) { 594 pr_err("calibrate didn't start"); 595 goto failed; 596 } 597 598 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 599 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 600 if (ret) { 601 pr_err("calibrate didn't finish"); 602 goto failed; 603 } 604 605 ret = 0; 606 607 failed: 608 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 609 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 610 611 debug("%s: returns %d\n", __func__, ret); 612 613 return ret; 614 } 615 616 static int eqos_disable_calibration_tegra186(struct udevice *dev) 617 { 618 struct eqos_priv *eqos = dev_get_priv(dev); 619 620 debug("%s(dev=%p):\n", __func__, dev); 621 622 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 623 EQOS_AUTO_CAL_CONFIG_ENABLE); 624 625 return 0; 626 } 627 628 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 629 { 630 struct eqos_priv *eqos = dev_get_priv(dev); 631 632 return clk_get_rate(&eqos->clk_slave_bus); 633 } 634 635 static int eqos_set_full_duplex(struct udevice *dev) 636 { 637 struct eqos_priv *eqos = dev_get_priv(dev); 638 639 debug("%s(dev=%p):\n", __func__, dev); 640 641 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 642 643 return 0; 644 } 645 646 static int eqos_set_half_duplex(struct udevice *dev) 647 { 648 struct eqos_priv *eqos = dev_get_priv(dev); 649 650 debug("%s(dev=%p):\n", __func__, dev); 651 652 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 653 654 /* WAR: Flush TX queue when switching to half-duplex */ 655 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 656 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 657 658 return 0; 659 } 660 661 static int eqos_set_gmii_speed(struct udevice *dev) 662 { 663 struct eqos_priv *eqos = dev_get_priv(dev); 664 665 debug("%s(dev=%p):\n", __func__, dev); 666 667 clrbits_le32(&eqos->mac_regs->configuration, 668 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 669 670 return 0; 671 } 672 673 static int eqos_set_mii_speed_100(struct udevice *dev) 674 { 675 struct eqos_priv *eqos = dev_get_priv(dev); 676 677 debug("%s(dev=%p):\n", __func__, dev); 678 679 setbits_le32(&eqos->mac_regs->configuration, 680 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 681 682 return 0; 683 } 684 685 static int eqos_set_mii_speed_10(struct udevice *dev) 686 { 687 struct eqos_priv *eqos = dev_get_priv(dev); 688 689 debug("%s(dev=%p):\n", __func__, dev); 690 691 clrsetbits_le32(&eqos->mac_regs->configuration, 692 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 693 694 return 0; 695 } 696 697 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 698 { 699 struct eqos_priv *eqos = dev_get_priv(dev); 700 ulong rate; 701 int ret; 702 703 debug("%s(dev=%p):\n", __func__, dev); 704 705 switch (eqos->phy->speed) { 706 case SPEED_1000: 707 rate = 125 * 1000 * 1000; 708 break; 709 case SPEED_100: 710 rate = 25 * 1000 * 1000; 711 break; 712 case SPEED_10: 713 rate = 2.5 * 1000 * 1000; 714 break; 715 default: 716 pr_err("invalid speed %d", eqos->phy->speed); 717 return -EINVAL; 718 } 719 720 ret = clk_set_rate(&eqos->clk_tx, rate); 721 if (ret < 0) { 722 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 723 return ret; 724 } 725 726 return 0; 727 } 728 729 static int eqos_adjust_link(struct udevice *dev) 730 { 731 struct eqos_priv *eqos = dev_get_priv(dev); 732 int ret; 733 bool en_calibration; 734 735 debug("%s(dev=%p):\n", __func__, dev); 736 737 if (eqos->phy->duplex) 738 ret = eqos_set_full_duplex(dev); 739 else 740 ret = eqos_set_half_duplex(dev); 741 if (ret < 0) { 742 pr_err("eqos_set_*_duplex() failed: %d", ret); 743 return ret; 744 } 745 746 switch (eqos->phy->speed) { 747 case SPEED_1000: 748 en_calibration = true; 749 ret = eqos_set_gmii_speed(dev); 750 break; 751 case SPEED_100: 752 en_calibration = true; 753 ret = eqos_set_mii_speed_100(dev); 754 break; 755 case SPEED_10: 756 en_calibration = false; 757 ret = eqos_set_mii_speed_10(dev); 758 break; 759 default: 760 pr_err("invalid speed %d", eqos->phy->speed); 761 return -EINVAL; 762 } 763 if (ret < 0) { 764 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 765 return ret; 766 } 767 768 if (en_calibration) { 769 ret = eqos_calibrate_pads_tegra186(dev); 770 if (ret < 0) { 771 pr_err("eqos_calibrate_pads_tegra186() failed: %d", ret); 772 return ret; 773 } 774 } else { 775 ret = eqos_disable_calibration_tegra186(dev); 776 if (ret < 0) { 777 pr_err("eqos_disable_calibration_tegra186() failed: %d", 778 ret); 779 return ret; 780 } 781 } 782 783 ret = eqos_set_tx_clk_speed_tegra186(dev); 784 if (ret < 0) { 785 pr_err("eqos_set_tx_clk_speed_tegra186() failed: %d", ret); 786 return ret; 787 } 788 789 return 0; 790 } 791 792 static int eqos_write_hwaddr(struct udevice *dev) 793 { 794 struct eth_pdata *plat = dev_get_platdata(dev); 795 struct eqos_priv *eqos = dev_get_priv(dev); 796 uint32_t val; 797 798 /* 799 * This function may be called before start() or after stop(). At that 800 * time, on at least some configurations of the EQoS HW, all clocks to 801 * the EQoS HW block will be stopped, and a reset signal applied. If 802 * any register access is attempted in this state, bus timeouts or CPU 803 * hangs may occur. This check prevents that. 804 * 805 * A simple solution to this problem would be to not implement 806 * write_hwaddr(), since start() always writes the MAC address into HW 807 * anyway. However, it is desirable to implement write_hwaddr() to 808 * support the case of SW that runs subsequent to U-Boot which expects 809 * the MAC address to already be programmed into the EQoS registers, 810 * which must happen irrespective of whether the U-Boot user (or 811 * scripts) actually made use of the EQoS device, and hence 812 * irrespective of whether start() was ever called. 813 * 814 * Note that this requirement by subsequent SW is not valid for 815 * Tegra186, and is likely not valid for any non-PCI instantiation of 816 * the EQoS HW block. This function is implemented solely as 817 * future-proofing with the expectation the driver will eventually be 818 * ported to some system where the expectation above is true. 819 */ 820 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 821 return 0; 822 823 /* Update the MAC address */ 824 val = (plat->enetaddr[5] << 8) | 825 (plat->enetaddr[4]); 826 writel(val, &eqos->mac_regs->address0_high); 827 val = (plat->enetaddr[3] << 24) | 828 (plat->enetaddr[2] << 16) | 829 (plat->enetaddr[1] << 8) | 830 (plat->enetaddr[0]); 831 writel(val, &eqos->mac_regs->address0_low); 832 833 return 0; 834 } 835 836 static int eqos_start(struct udevice *dev) 837 { 838 struct eqos_priv *eqos = dev_get_priv(dev); 839 int ret, i; 840 ulong rate; 841 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 842 ulong last_rx_desc; 843 844 debug("%s(dev=%p):\n", __func__, dev); 845 846 eqos->tx_desc_idx = 0; 847 eqos->rx_desc_idx = 0; 848 849 ret = eqos_start_clks_tegra186(dev); 850 if (ret < 0) { 851 pr_err("eqos_start_clks_tegra186() failed: %d", ret); 852 goto err; 853 } 854 855 ret = eqos_start_resets_tegra186(dev); 856 if (ret < 0) { 857 pr_err("eqos_start_resets_tegra186() failed: %d", ret); 858 goto err_stop_clks; 859 } 860 861 udelay(10); 862 863 eqos->reg_access_ok = true; 864 865 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 866 EQOS_DMA_MODE_SWR, false, 10, false); 867 if (ret) { 868 pr_err("EQOS_DMA_MODE_SWR stuck"); 869 goto err_stop_resets; 870 } 871 872 ret = eqos_calibrate_pads_tegra186(dev); 873 if (ret < 0) { 874 pr_err("eqos_calibrate_pads_tegra186() failed: %d", ret); 875 goto err_stop_resets; 876 } 877 878 rate = eqos_get_tick_clk_rate_tegra186(dev); 879 val = (rate / 1000000) - 1; 880 writel(val, &eqos->mac_regs->us_tic_counter); 881 882 eqos->phy = phy_connect(eqos->mii, 0, dev, 0); 883 if (!eqos->phy) { 884 pr_err("phy_connect() failed"); 885 goto err_stop_resets; 886 } 887 ret = phy_config(eqos->phy); 888 if (ret < 0) { 889 pr_err("phy_config() failed: %d", ret); 890 goto err_shutdown_phy; 891 } 892 ret = phy_startup(eqos->phy); 893 if (ret < 0) { 894 pr_err("phy_startup() failed: %d", ret); 895 goto err_shutdown_phy; 896 } 897 898 if (!eqos->phy->link) { 899 pr_err("No link"); 900 goto err_shutdown_phy; 901 } 902 903 ret = eqos_adjust_link(dev); 904 if (ret < 0) { 905 pr_err("eqos_adjust_link() failed: %d", ret); 906 goto err_shutdown_phy; 907 } 908 909 /* Configure MTL */ 910 911 /* Enable Store and Forward mode for TX */ 912 /* Program Tx operating mode */ 913 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 914 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 915 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 916 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 917 918 /* Transmit Queue weight */ 919 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 920 921 /* Enable Store and Forward mode for RX, since no jumbo frame */ 922 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 923 EQOS_MTL_RXQ0_OPERATION_MODE_RSF); 924 925 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 926 val = readl(&eqos->mac_regs->hw_feature1); 927 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 928 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 929 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 930 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 931 932 /* 933 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 934 * r/tqs is encoded as (n / 256) - 1. 935 */ 936 tqs = (128 << tx_fifo_sz) / 256 - 1; 937 rqs = (128 << rx_fifo_sz) / 256 - 1; 938 939 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 940 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 941 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 942 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 943 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 944 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 945 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 946 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 947 948 /* Flow control used only if each channel gets 4KB or more FIFO */ 949 if (rqs >= ((4096 / 256) - 1)) { 950 u32 rfd, rfa; 951 952 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 953 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 954 955 /* 956 * Set Threshold for Activating Flow Contol space for min 2 957 * frames ie, (1500 * 1) = 1500 bytes. 958 * 959 * Set Threshold for Deactivating Flow Contol for space of 960 * min 1 frame (frame size 1500bytes) in receive fifo 961 */ 962 if (rqs == ((4096 / 256) - 1)) { 963 /* 964 * This violates the above formula because of FIFO size 965 * limit therefore overflow may occur inspite of this. 966 */ 967 rfd = 0x3; /* Full-3K */ 968 rfa = 0x1; /* Full-1.5K */ 969 } else if (rqs == ((8192 / 256) - 1)) { 970 rfd = 0x6; /* Full-4K */ 971 rfa = 0xa; /* Full-6K */ 972 } else if (rqs == ((16384 / 256) - 1)) { 973 rfd = 0x6; /* Full-4K */ 974 rfa = 0x12; /* Full-10K */ 975 } else { 976 rfd = 0x6; /* Full-4K */ 977 rfa = 0x1E; /* Full-16K */ 978 } 979 980 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 981 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 982 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 983 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 984 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 985 (rfd << 986 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 987 (rfa << 988 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 989 } 990 991 /* Configure MAC */ 992 993 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 994 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 995 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 996 EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB << 997 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 998 999 /* Set TX flow control parameters */ 1000 /* Set Pause Time */ 1001 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1002 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1003 /* Assign priority for TX flow control */ 1004 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1005 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1006 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1007 /* Assign priority for RX flow control */ 1008 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1009 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1010 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1011 /* Enable flow control */ 1012 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1013 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1014 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1015 EQOS_MAC_RX_FLOW_CTRL_RFE); 1016 1017 clrsetbits_le32(&eqos->mac_regs->configuration, 1018 EQOS_MAC_CONFIGURATION_GPSLCE | 1019 EQOS_MAC_CONFIGURATION_WD | 1020 EQOS_MAC_CONFIGURATION_JD | 1021 EQOS_MAC_CONFIGURATION_JE, 1022 EQOS_MAC_CONFIGURATION_CST | 1023 EQOS_MAC_CONFIGURATION_ACS); 1024 1025 eqos_write_hwaddr(dev); 1026 1027 /* Configure DMA */ 1028 1029 /* Enable OSP mode */ 1030 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1031 EQOS_DMA_CH0_TX_CONTROL_OSP); 1032 1033 /* RX buffer size. Must be a multiple of bus width */ 1034 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1035 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1036 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1037 EQOS_MAX_PACKET_SIZE << 1038 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1039 1040 setbits_le32(&eqos->dma_regs->ch0_control, 1041 EQOS_DMA_CH0_CONTROL_PBLX8); 1042 1043 /* 1044 * Burst length must be < 1/2 FIFO size. 1045 * FIFO size in tqs is encoded as (n / 256) - 1. 1046 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1047 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1048 */ 1049 pbl = tqs + 1; 1050 if (pbl > 32) 1051 pbl = 32; 1052 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1053 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1054 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1055 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1056 1057 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1058 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1059 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1060 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1061 1062 /* DMA performance configuration */ 1063 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1064 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1065 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1066 writel(val, &eqos->dma_regs->sysbus_mode); 1067 1068 /* Set up descriptors */ 1069 1070 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1071 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1072 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1073 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1074 (i * EQOS_MAX_PACKET_SIZE)); 1075 rx_desc->des3 |= EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1076 } 1077 flush_cache((unsigned long)eqos->descs, EQOS_DESCRIPTORS_SIZE); 1078 1079 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1080 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1081 writel(EQOS_DESCRIPTORS_TX - 1, 1082 &eqos->dma_regs->ch0_txdesc_ring_length); 1083 1084 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1085 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1086 writel(EQOS_DESCRIPTORS_RX - 1, 1087 &eqos->dma_regs->ch0_rxdesc_ring_length); 1088 1089 /* Enable everything */ 1090 1091 setbits_le32(&eqos->mac_regs->configuration, 1092 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1093 1094 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1095 EQOS_DMA_CH0_TX_CONTROL_ST); 1096 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1097 EQOS_DMA_CH0_RX_CONTROL_SR); 1098 1099 /* TX tail pointer not written until we need to TX a packet */ 1100 /* 1101 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1102 * first descriptor, implying all descriptors were available. However, 1103 * that's not distinguishable from none of the descriptors being 1104 * available. 1105 */ 1106 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1107 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1108 1109 eqos->started = true; 1110 1111 debug("%s: OK\n", __func__); 1112 return 0; 1113 1114 err_shutdown_phy: 1115 phy_shutdown(eqos->phy); 1116 eqos->phy = NULL; 1117 err_stop_resets: 1118 eqos_stop_resets_tegra186(dev); 1119 err_stop_clks: 1120 eqos_stop_clks_tegra186(dev); 1121 err: 1122 pr_err("FAILED: %d", ret); 1123 return ret; 1124 } 1125 1126 void eqos_stop(struct udevice *dev) 1127 { 1128 struct eqos_priv *eqos = dev_get_priv(dev); 1129 int i; 1130 1131 debug("%s(dev=%p):\n", __func__, dev); 1132 1133 if (!eqos->started) 1134 return; 1135 eqos->started = false; 1136 eqos->reg_access_ok = false; 1137 1138 /* Disable TX DMA */ 1139 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1140 EQOS_DMA_CH0_TX_CONTROL_ST); 1141 1142 /* Wait for TX all packets to drain out of MTL */ 1143 for (i = 0; i < 1000000; i++) { 1144 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1145 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1146 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1147 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1148 if ((trcsts != 1) && (!txqsts)) 1149 break; 1150 } 1151 1152 /* Turn off MAC TX and RX */ 1153 clrbits_le32(&eqos->mac_regs->configuration, 1154 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1155 1156 /* Wait for all RX packets to drain out of MTL */ 1157 for (i = 0; i < 1000000; i++) { 1158 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1159 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1160 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1161 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1162 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1163 if ((!prxq) && (!rxqsts)) 1164 break; 1165 } 1166 1167 /* Turn off RX DMA */ 1168 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1169 EQOS_DMA_CH0_RX_CONTROL_SR); 1170 1171 if (eqos->phy) { 1172 phy_shutdown(eqos->phy); 1173 eqos->phy = NULL; 1174 } 1175 eqos_stop_resets_tegra186(dev); 1176 eqos_stop_clks_tegra186(dev); 1177 1178 debug("%s: OK\n", __func__); 1179 } 1180 1181 int eqos_send(struct udevice *dev, void *packet, int length) 1182 { 1183 struct eqos_priv *eqos = dev_get_priv(dev); 1184 struct eqos_desc *tx_desc; 1185 int i; 1186 1187 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1188 length); 1189 1190 memcpy(eqos->tx_dma_buf, packet, length); 1191 eqos_flush_buffer(eqos->tx_dma_buf, length); 1192 1193 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1194 eqos->tx_desc_idx++; 1195 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1196 1197 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1198 tx_desc->des1 = 0; 1199 tx_desc->des2 = length; 1200 /* 1201 * Make sure that if HW sees the _OWN write below, it will see all the 1202 * writes to the rest of the descriptor too. 1203 */ 1204 mb(); 1205 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1206 eqos_flush_desc(tx_desc); 1207 1208 writel((ulong)(tx_desc + 1), &eqos->dma_regs->ch0_txdesc_tail_pointer); 1209 1210 for (i = 0; i < 1000000; i++) { 1211 eqos_inval_desc(tx_desc); 1212 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1213 return 0; 1214 udelay(1); 1215 } 1216 1217 debug("%s: TX timeout\n", __func__); 1218 1219 return -ETIMEDOUT; 1220 } 1221 1222 int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1223 { 1224 struct eqos_priv *eqos = dev_get_priv(dev); 1225 struct eqos_desc *rx_desc; 1226 int length; 1227 1228 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1229 1230 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1231 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1232 debug("%s: RX packet not available\n", __func__); 1233 return -EAGAIN; 1234 } 1235 1236 *packetp = eqos->rx_dma_buf + 1237 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1238 length = rx_desc->des3 & 0x7fff; 1239 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1240 1241 eqos_inval_buffer(*packetp, length); 1242 1243 return length; 1244 } 1245 1246 int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1247 { 1248 struct eqos_priv *eqos = dev_get_priv(dev); 1249 uchar *packet_expected; 1250 struct eqos_desc *rx_desc; 1251 1252 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1253 1254 packet_expected = eqos->rx_dma_buf + 1255 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1256 if (packet != packet_expected) { 1257 debug("%s: Unexpected packet (expected %p)\n", __func__, 1258 packet_expected); 1259 return -EINVAL; 1260 } 1261 1262 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1263 rx_desc->des0 = (u32)(ulong)packet; 1264 rx_desc->des1 = 0; 1265 rx_desc->des2 = 0; 1266 /* 1267 * Make sure that if HW sees the _OWN write below, it will see all the 1268 * writes to the rest of the descriptor too. 1269 */ 1270 mb(); 1271 rx_desc->des3 |= EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1272 eqos_flush_desc(rx_desc); 1273 1274 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1275 1276 eqos->rx_desc_idx++; 1277 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1278 1279 return 0; 1280 } 1281 1282 static int eqos_probe_resources_core(struct udevice *dev) 1283 { 1284 struct eqos_priv *eqos = dev_get_priv(dev); 1285 int ret; 1286 1287 debug("%s(dev=%p):\n", __func__, dev); 1288 1289 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1290 EQOS_DESCRIPTORS_RX); 1291 if (!eqos->descs) { 1292 debug("%s: eqos_alloc_descs() failed\n", __func__); 1293 ret = -ENOMEM; 1294 goto err; 1295 } 1296 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1297 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1298 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1299 eqos->rx_descs); 1300 1301 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1302 if (!eqos->tx_dma_buf) { 1303 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1304 ret = -ENOMEM; 1305 goto err_free_descs; 1306 } 1307 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1308 1309 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1310 if (!eqos->rx_dma_buf) { 1311 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1312 ret = -ENOMEM; 1313 goto err_free_tx_dma_buf; 1314 } 1315 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1316 1317 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1318 if (!eqos->rx_pkt) { 1319 debug("%s: malloc(rx_pkt) failed\n", __func__); 1320 ret = -ENOMEM; 1321 goto err_free_rx_dma_buf; 1322 } 1323 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1324 1325 debug("%s: OK\n", __func__); 1326 return 0; 1327 1328 err_free_rx_dma_buf: 1329 free(eqos->rx_dma_buf); 1330 err_free_tx_dma_buf: 1331 free(eqos->tx_dma_buf); 1332 err_free_descs: 1333 eqos_free_descs(eqos->descs); 1334 err: 1335 1336 debug("%s: returns %d\n", __func__, ret); 1337 return ret; 1338 } 1339 1340 static int eqos_remove_resources_core(struct udevice *dev) 1341 { 1342 struct eqos_priv *eqos = dev_get_priv(dev); 1343 1344 debug("%s(dev=%p):\n", __func__, dev); 1345 1346 free(eqos->rx_pkt); 1347 free(eqos->rx_dma_buf); 1348 free(eqos->tx_dma_buf); 1349 eqos_free_descs(eqos->descs); 1350 1351 debug("%s: OK\n", __func__); 1352 return 0; 1353 } 1354 1355 static int eqos_probe_resources_tegra186(struct udevice *dev) 1356 { 1357 struct eqos_priv *eqos = dev_get_priv(dev); 1358 int ret; 1359 1360 debug("%s(dev=%p):\n", __func__, dev); 1361 1362 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1363 if (ret) { 1364 pr_err("reset_get_by_name(rst) failed: %d", ret); 1365 return ret; 1366 } 1367 1368 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1369 &eqos->phy_reset_gpio, 1370 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1371 if (ret) { 1372 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1373 goto err_free_reset_eqos; 1374 } 1375 1376 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1377 if (ret) { 1378 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1379 goto err_free_gpio_phy_reset; 1380 } 1381 1382 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1383 if (ret) { 1384 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1385 goto err_free_clk_slave_bus; 1386 } 1387 1388 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1389 if (ret) { 1390 pr_err("clk_get_by_name(rx) failed: %d", ret); 1391 goto err_free_clk_master_bus; 1392 } 1393 1394 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1395 if (ret) { 1396 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1397 goto err_free_clk_rx; 1398 return ret; 1399 } 1400 1401 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1402 if (ret) { 1403 pr_err("clk_get_by_name(tx) failed: %d", ret); 1404 goto err_free_clk_ptp_ref; 1405 } 1406 1407 debug("%s: OK\n", __func__); 1408 return 0; 1409 1410 err_free_clk_ptp_ref: 1411 clk_free(&eqos->clk_ptp_ref); 1412 err_free_clk_rx: 1413 clk_free(&eqos->clk_rx); 1414 err_free_clk_master_bus: 1415 clk_free(&eqos->clk_master_bus); 1416 err_free_clk_slave_bus: 1417 clk_free(&eqos->clk_slave_bus); 1418 err_free_gpio_phy_reset: 1419 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1420 err_free_reset_eqos: 1421 reset_free(&eqos->reset_ctl); 1422 1423 debug("%s: returns %d\n", __func__, ret); 1424 return ret; 1425 } 1426 1427 static int eqos_remove_resources_tegra186(struct udevice *dev) 1428 { 1429 struct eqos_priv *eqos = dev_get_priv(dev); 1430 1431 debug("%s(dev=%p):\n", __func__, dev); 1432 1433 clk_free(&eqos->clk_tx); 1434 clk_free(&eqos->clk_ptp_ref); 1435 clk_free(&eqos->clk_rx); 1436 clk_free(&eqos->clk_slave_bus); 1437 clk_free(&eqos->clk_master_bus); 1438 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1439 reset_free(&eqos->reset_ctl); 1440 1441 debug("%s: OK\n", __func__); 1442 return 0; 1443 } 1444 1445 static int eqos_probe(struct udevice *dev) 1446 { 1447 struct eqos_priv *eqos = dev_get_priv(dev); 1448 int ret; 1449 1450 debug("%s(dev=%p):\n", __func__, dev); 1451 1452 eqos->dev = dev; 1453 eqos->config = (void *)dev_get_driver_data(dev); 1454 1455 eqos->regs = devfdt_get_addr(dev); 1456 if (eqos->regs == FDT_ADDR_T_NONE) { 1457 pr_err("devfdt_get_addr() failed"); 1458 return -ENODEV; 1459 } 1460 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 1461 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 1462 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 1463 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 1464 1465 ret = eqos_probe_resources_core(dev); 1466 if (ret < 0) { 1467 pr_err("eqos_probe_resources_core() failed: %d", ret); 1468 return ret; 1469 } 1470 1471 ret = eqos_probe_resources_tegra186(dev); 1472 if (ret < 0) { 1473 pr_err("eqos_probe_resources_tegra186() failed: %d", ret); 1474 goto err_remove_resources_core; 1475 } 1476 1477 eqos->mii = mdio_alloc(); 1478 if (!eqos->mii) { 1479 pr_err("mdio_alloc() failed"); 1480 goto err_remove_resources_tegra; 1481 } 1482 eqos->mii->read = eqos_mdio_read; 1483 eqos->mii->write = eqos_mdio_write; 1484 eqos->mii->priv = eqos; 1485 strcpy(eqos->mii->name, dev->name); 1486 1487 ret = mdio_register(eqos->mii); 1488 if (ret < 0) { 1489 pr_err("mdio_register() failed: %d", ret); 1490 goto err_free_mdio; 1491 } 1492 1493 debug("%s: OK\n", __func__); 1494 return 0; 1495 1496 err_free_mdio: 1497 mdio_free(eqos->mii); 1498 err_remove_resources_tegra: 1499 eqos_remove_resources_tegra186(dev); 1500 err_remove_resources_core: 1501 eqos_remove_resources_core(dev); 1502 1503 debug("%s: returns %d\n", __func__, ret); 1504 return ret; 1505 } 1506 1507 static int eqos_remove(struct udevice *dev) 1508 { 1509 struct eqos_priv *eqos = dev_get_priv(dev); 1510 1511 debug("%s(dev=%p):\n", __func__, dev); 1512 1513 mdio_unregister(eqos->mii); 1514 mdio_free(eqos->mii); 1515 eqos_remove_resources_tegra186(dev); 1516 eqos_probe_resources_core(dev); 1517 1518 debug("%s: OK\n", __func__); 1519 return 0; 1520 } 1521 1522 static const struct eth_ops eqos_ops = { 1523 .start = eqos_start, 1524 .stop = eqos_stop, 1525 .send = eqos_send, 1526 .recv = eqos_recv, 1527 .free_pkt = eqos_free_pkt, 1528 .write_hwaddr = eqos_write_hwaddr, 1529 }; 1530 1531 static const struct eqos_config eqos_tegra186_config = { 1532 .reg_access_always_ok = false, 1533 }; 1534 1535 static const struct udevice_id eqos_ids[] = { 1536 { 1537 .compatible = "nvidia,tegra186-eqos", 1538 .data = (ulong)&eqos_tegra186_config 1539 }, 1540 { } 1541 }; 1542 1543 U_BOOT_DRIVER(eth_eqos) = { 1544 .name = "eth_eqos", 1545 .id = UCLASS_ETH, 1546 .of_match = eqos_ids, 1547 .probe = eqos_probe, 1548 .remove = eqos_remove, 1549 .ops = &eqos_ops, 1550 .priv_auto_alloc_size = sizeof(struct eqos_priv), 1551 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 1552 }; 1553